hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
282dea0f9bdcc15baa7c6e6b2c16a56f9ba2f651.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/device_ptr.h> #include <thrust/sort.h> //#include "hip/hip_runtime.h" //#include "hip/hip_runtime.h" #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" //#include "opencv2/core/cuda_devptrs.hpp" using namespace std; using namespace cv; using namespace cv::gpu; using namespace cv::gpu::device; texture<float, hipTextureType2D, hipReadModeElementType> eigTex(0, hipFilterModePoint, hipAddressModeClamp); __device__ uint g_counter = 0; template <class Mask> __global__ void findCorners(float threshold, const Mask mask, float2* corners, uint max_count, int rows, int cols) { const int j = blockIdx.x * blockDim.x + threadIdx.x; const int i = blockIdx.y * blockDim.y + threadIdx.y; if (i > 0 && i < rows - 1 && j > 0 && j < cols - 1 && mask(i, j)) { float val = tex2D(eigTex, j, i); if (val > threshold) { float maxVal = val; maxVal = ::fmax(tex2D(eigTex, j - 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i + 1), maxVal); if (val == maxVal) { const uint ind = atomicInc(&g_counter, (uint)(-1)); if (ind < max_count) corners[ind] = make_float2(j, i); } } } } int findCorners_gpu(PtrStepSzf eig, float threshold, PtrStepSzb mask, float2* corners, int max_count) { void* counter_ptr; cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) ); cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(uint)) ); bindTexture(&eigTex, eig); dim3 block(32, 32); dim3 grid(divUp(eig.cols, block.x), divUp(eig.rows, block.y)); if (mask.data) hipLaunchKernelGGL(( findCorners), dim3(grid), dim3(block), 0, 0, threshold, SingleMask(mask), corners, max_count, eig.rows, eig.cols); else hipLaunchKernelGGL(( findCorners), dim3(grid), dim3(block), 0, 0, threshold, WithOutMask(), corners, max_count, eig.rows, eig.cols); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); uint count; cudaSafeCall( hipMemcpy(&count, counter_ptr, sizeof(uint), hipMemcpyDeviceToHost) ); return min(count, max_count); } class EigGreater { public: __device__ __forceinline__ bool operator()(float2 a, float2 b) const { return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y); } }; void sortCorners_gpu(PtrStepSzf eig, float2* corners, int count) { bindTexture(&eigTex, eig); thrust::device_ptr<float2> ptr(corners); thrust::sort(ptr, ptr + count, EigGreater()); }
282dea0f9bdcc15baa7c6e6b2c16a56f9ba2f651.cu
#include <thrust/device_ptr.h> #include <thrust/sort.h> //#include "cuda.h" //#include "cuda_runtime.h" #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" //#include "opencv2/core/cuda_devptrs.hpp" using namespace std; using namespace cv; using namespace cv::gpu; using namespace cv::gpu::device; texture<float, cudaTextureType2D, cudaReadModeElementType> eigTex(0, cudaFilterModePoint, cudaAddressModeClamp); __device__ uint g_counter = 0; template <class Mask> __global__ void findCorners(float threshold, const Mask mask, float2* corners, uint max_count, int rows, int cols) { const int j = blockIdx.x * blockDim.x + threadIdx.x; const int i = blockIdx.y * blockDim.y + threadIdx.y; if (i > 0 && i < rows - 1 && j > 0 && j < cols - 1 && mask(i, j)) { float val = tex2D(eigTex, j, i); if (val > threshold) { float maxVal = val; maxVal = ::fmax(tex2D(eigTex, j - 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i + 1), maxVal); if (val == maxVal) { const uint ind = atomicInc(&g_counter, (uint)(-1)); if (ind < max_count) corners[ind] = make_float2(j, i); } } } } int findCorners_gpu(PtrStepSzf eig, float threshold, PtrStepSzb mask, float2* corners, int max_count) { void* counter_ptr; cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) ); cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(uint)) ); bindTexture(&eigTex, eig); dim3 block(32, 32); dim3 grid(divUp(eig.cols, block.x), divUp(eig.rows, block.y)); if (mask.data) findCorners<<<grid, block>>>(threshold, SingleMask(mask), corners, max_count, eig.rows, eig.cols); else findCorners<<<grid, block>>>(threshold, WithOutMask(), corners, max_count, eig.rows, eig.cols); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); uint count; cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(uint), cudaMemcpyDeviceToHost) ); return min(count, max_count); } class EigGreater { public: __device__ __forceinline__ bool operator()(float2 a, float2 b) const { return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y); } }; void sortCorners_gpu(PtrStepSzf eig, float2* corners, int count) { bindTexture(&eigTex, eig); thrust::device_ptr<float2> ptr(corners); thrust::sort(ptr, ptr + count, EigGreater()); }
ab740043e7c81b181c49310a55f05b27dc3309dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "dmv.h" /* * Utility function to get the thread ID within the * global working space. */ __device__ int get_global_tid() { return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x; } /* * Utility function to get the thread ID within the * local/block working space. */ __device__ int get_local_tid() { return blockDim.x*threadIdx.y + threadIdx.x; } /* * Naive kernel */ __global__ void dmv_gpu_naive(const value_t *a, const value_t *x, value_t *y, size_t n) { float result=0.0; const int row = get_global_tid(); int i; if(row < n) { #pragma unroll for(i=0; i<n; i++) result += a[row*n+i]*x[i]; y[row] = result; } } /* * Coalesced memory acceses */ __global__ void dmv_gpu_coalesced(const value_t *a, const value_t *x, value_t *y, size_t n) { float result=0.0; const int row = get_global_tid(); int i; if(row < n) { #pragma unroll for(i=0; i<n; i++) result += a[row+i*n]*x[i]; y[row] = result; } } /* * Use of shared memory */ __global__ void dmv_gpu_shmem(const value_t *a, const value_t *x, value_t *y, size_t n) { extern __shared__ float x_sh[]; int bl_x_ind; int bl_y_ind; int bl_l; float result = 0.f; register int i; bl_l = blockDim.x; bl_x_ind = blockIdx.x * bl_l; bl_y_ind = blockIdx.y * bl_l; x_sh[threadIdx.x] = x[bl_y_ind + threadIdx.x]; __syncthreads(); const int row = bl_x_ind + threadIdx.x; for(i=0; i<bl_l; i++) result += x_sh[i] * a[row+n*(bl_y_ind+i)]; atomicAdd(y + row , result); }
ab740043e7c81b181c49310a55f05b27dc3309dc.cu
#include <stdio.h> #include "dmv.h" /* * Utility function to get the thread ID within the * global working space. */ __device__ int get_global_tid() { return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x; } /* * Utility function to get the thread ID within the * local/block working space. */ __device__ int get_local_tid() { return blockDim.x*threadIdx.y + threadIdx.x; } /* * Naive kernel */ __global__ void dmv_gpu_naive(const value_t *a, const value_t *x, value_t *y, size_t n) { float result=0.0; const int row = get_global_tid(); int i; if(row < n) { #pragma unroll for(i=0; i<n; i++) result += a[row*n+i]*x[i]; y[row] = result; } } /* * Coalesced memory acceses */ __global__ void dmv_gpu_coalesced(const value_t *a, const value_t *x, value_t *y, size_t n) { float result=0.0; const int row = get_global_tid(); int i; if(row < n) { #pragma unroll for(i=0; i<n; i++) result += a[row+i*n]*x[i]; y[row] = result; } } /* * Use of shared memory */ __global__ void dmv_gpu_shmem(const value_t *a, const value_t *x, value_t *y, size_t n) { extern __shared__ float x_sh[]; int bl_x_ind; int bl_y_ind; int bl_l; float result = 0.f; register int i; bl_l = blockDim.x; bl_x_ind = blockIdx.x * bl_l; bl_y_ind = blockIdx.y * bl_l; x_sh[threadIdx.x] = x[bl_y_ind + threadIdx.x]; __syncthreads(); const int row = bl_x_ind + threadIdx.x; for(i=0; i<bl_l; i++) result += x_sh[i] * a[row+n*(bl_y_ind+i)]; atomicAdd(y + row , result); }
289b688d86daa76f477e2522566bfba54ea814f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "definitions.h" #include "kernel.h" #include <Windows.h> //////////////////////////////////////////////////////////////////////////////////////////////////// // GLOBAL VARIABLES //////////////////////////////////////////////////////////////////////////////////////////////////// // Device images (GPU) byte *dev_src_image; byte *dev_dst_image; // Information about the image to be processed int width; int height; int array_size; // Error management hipError_t error; // Threshold variables (also Border y Reverse) int num_threads_th; int num_blocks_th; int step_th; // Erode & Dilate variables (also Border) int num_block_x_ed; int num_block_y_ed; int num_threads_x_ed; int num_threads_y_ed; dim3 grid_dim_ed; dim3 block_dim_ed; //////////////////////////////////////////////////////////////////////////////////////////////////// // GPU UTILITY //////////////////////////////////////////////////////////////////////////////////////////////////// // Pointers swap void swap_buffers(byte **a, byte **b) { byte *aux = *a; *a = *b; *b = aux; } //////////////////////////////////////////////////////////////////////////////////////////////////// // THRESHOLD [GRAYSCALE TO BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Threshold kernel __global__ void threshold_kernel(byte *dev_src_image, byte *dev_dst_image, int min, int max, int array_size, int num_threads_th, int step_th) { // Calculate the position of the thread in the grid int pos = blockIdx.x * num_threads_th + threadIdx.x; // Do the operation for (; pos < array_size; pos += step_th) dev_dst_image[pos] = (dev_src_image[pos] >= min && dev_src_image[pos] <= max) ? 1 : 0; } // Public call to Threshold kernel void _threshold(int min, int max, int _num_blocks_th, int _num_threads_th) { // If the user doesn't indicate blocks and threads, these are calculated automatically if (_num_threads_th == 0 && _num_blocks_th == 0) _auto_threshold_parameters(); else { num_threads_th = _num_threads_th; num_blocks_th = _num_blocks_th; step_th = _num_blocks_th * _num_threads_th; } // Threshold kernel call threshold_kernel << <num_blocks_th, num_threads_th >> > (dev_src_image, dev_dst_image, min, max, array_size, num_threads_th, step_th); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // ERODE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Erode kernel __global__ void erode_kernel(byte *dev_src_image, byte *dev_dst_image, int height, int width, int radio) { // Calculate the position of the thread in the grid int posx = threadIdx.x + blockIdx.x * blockDim.x; int posy = threadIdx.y + blockIdx.y * blockDim.y; // Do the operation if (posx + (posy * width) <= (width * height)) { // Calculate the mask limit unsigned int start_i = max(posy - radio, 0); unsigned int end_i = min(height - 1, posy + radio); unsigned int start_j = max(posx - radio, 0); unsigned int end_j = min(width - 1, posx + radio); int _min = 1; // Write the minimum value for (int i = start_i; i <= end_i; i++) for (int j = start_j; j <= end_j; j++) _min = min(_min, dev_src_image[i*width + j]); dev_dst_image[posy * width + posx] = _min; } } // Public call to Erode kernel void _erode(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Erode kernel call erode_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // DILATE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Dilate kernel __global__ void dilate_kernel(byte *dev_src_image, byte *dev_dst_image, int height, int width, int radio) { // Calculate the position of the thread in the grid int posx = threadIdx.x + blockIdx.x * blockDim.x; int posy = threadIdx.y + blockIdx.y * blockDim.y; // Do the operation if (posx + (posy * width) <= (width * height)) { // Calculate the mask limit unsigned int start_i = max(posy - radio, 0); unsigned int end_i = min(height - 1, posy + radio); unsigned int start_j = max(posx - radio, 0); unsigned int end_j = min(width - 1, posx + radio); int _max = 0; // Write the maximum value for (int i = start_i; i <= end_i; i++) for (int j = start_j; j <= end_j; j++) _max = max(_max, dev_src_image[i*width + j]); dev_dst_image[posy * width + posx] = _max; } } // Public call to Dilate kernel void _dilate(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // BORDER [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Border kernel __global__ void border_kernel(byte *dev_src_image, byte *dev_dst_image, int array_size, int num_threads_th, int step_th) { // Calculate the position of the thread in the grid int pos = blockIdx.x * num_threads_th + threadIdx.x; // Do the operation for (; pos < array_size; pos += step_th) dev_dst_image[pos] = (dev_dst_image[pos] == dev_src_image[pos]) ? 0 : 1; } // Public call to Border kernel void _border(int radio, int _num_block_x_db, int _num_block_y_db, int _num_threads_x_db, int _num_threads_y_db, int _num_blocks_ab, int _num_threads_ab) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_db, _num_block_y_db, _num_threads_x_db, _num_threads_y_db); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); // If the user doesn't indicate blocks and threads, these are calculated automatically if (_num_threads_ab == 0 && _num_blocks_ab == 0) _auto_threshold_parameters(); else { num_threads_th = _num_threads_ab; num_blocks_th = _num_blocks_ab; step_th = _num_blocks_ab * _num_threads_ab; } // Border kernel call border_kernel << <num_blocks_th, num_threads_th >> > (dev_src_image, dev_dst_image, array_size, num_threads_th, step_th); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // REVERSE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Reverse kernel __global__ void reverse_kernel(byte *dev_src_image, byte *dev_dst_image, int array_size, int num_threads_th, int step_th) { // Calculate the position of the thread in the grid int pos = blockIdx.x * num_threads_th + threadIdx.x; // Do the operation for (; pos < array_size; pos += step_th) dev_dst_image[pos] = (dev_src_image[pos] == 0) ? 1 : 0; } // Public call to Reverse kernel void _reverse(int _num_blocks_r, int _num_threads_r) { // If the user doesn't indicate blocks and threads, these are calculated automatically if (_num_threads_r == 0 && _num_blocks_r == 0) _auto_threshold_parameters(); else { num_threads_th = _num_threads_r; num_blocks_th = _num_blocks_r; step_th = _num_blocks_r * _num_threads_r; } // Reverse kernel call reverse_kernel << <num_blocks_th, num_threads_th >> > (dev_src_image, dev_dst_image, array_size, num_threads_th, step_th); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // OPEN & CLOSE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Public call to Open void _open(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Erode kernel call erode_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); } // Public call to Open void _close(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); // Erode kernel call erode_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); hipDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // CUDA MEMORY MANAGEMENT //////////////////////////////////////////////////////////////////////////////////////////////////// // Reserve GPU memory and copy image from CPU to GPU void _copy_img_to_gpu(byte *hst_src_image, int *error_code) { // Reserve src image memory on GPU error = hipMalloc(&dev_src_image, array_size); if (error != hipSuccess) { *error_code = CODE_ERROR_200; exit(EXIT_FAILURE); } // Reserve dst image memory on GPU error = hipMalloc(&dev_dst_image, array_size); if (error != hipSuccess) { *error_code = CODE_ERROR_201; exit(EXIT_FAILURE); } // Copy src image memory from CPU to GPU error = hipMemcpy(dev_src_image, hst_src_image, array_size, hipMemcpyHostToDevice); if (error != hipSuccess) { *error_code = CODE_ERROR_202; exit(EXIT_FAILURE); } hipDeviceSynchronize(); } // Copy image from GPU to CPU void _copy_img_to_cpu(byte *hst_dst_image, int *error_code) { hipDeviceSynchronize(); // Copy src image memory from GPU to CPU error = hipMemcpy(hst_dst_image, dev_src_image, array_size, hipMemcpyDeviceToHost); if (error != hipSuccess) { *error_code = CODE_ERROR_203; exit(EXIT_FAILURE); } hipDeviceSynchronize(); } // Free cuda memory void _cuda_free(int *error_code) { // Free src image memory error = hipFree(dev_src_image); if (error != hipSuccess) { *error_code = CODE_ERROR_204; exit(EXIT_FAILURE); } //Free dst image memory error = hipFree(dev_dst_image); if (error != hipSuccess) { *error_code = CODE_ERROR_205; exit(EXIT_FAILURE); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // PARAMETRIZACIN ALGORITMIA E IMGENES //////////////////////////////////////////////////////////////////////////////////////////////////// // Set image to be processed parameters void _set_img_info(int _width, int _height) { width = _width; height = _height; array_size = _width * _height; } // Automatic block and thread assignment if the user does't indicate anything void _set_erode_dilate_parameters(int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { if (_num_block_x_ed == 0 && _num_block_y_ed == 0 && _num_threads_x_ed == 0 && _num_threads_y_ed == 0) { // Divide by multiple of 2. 16 is good option num_block_x_ed = width / 16; if (num_block_x_ed * 16 < width) num_block_x_ed++; num_block_y_ed = height / 16; if (num_block_y_ed * 16 < height) num_block_y_ed++; num_threads_x_ed = 16; num_threads_y_ed = 16; grid_dim_ed.x = num_block_x_ed; grid_dim_ed.y = num_block_y_ed; block_dim_ed.x = num_threads_x_ed; block_dim_ed.y = num_threads_y_ed; } else { num_block_x_ed = _num_block_x_ed; num_block_y_ed = _num_block_y_ed; num_threads_x_ed = _num_threads_x_ed; num_threads_y_ed = _num_threads_y_ed; grid_dim_ed.x = _num_block_x_ed; grid_dim_ed.y = _num_block_y_ed; block_dim_ed.x = _num_threads_x_ed; block_dim_ed.y = _num_threads_y_ed; } } // Automatic block and thread assignment if the user does't indicate anything void _auto_threshold_parameters() { num_threads_th = 1024; // It's the maximum threads per block num_blocks_th = array_size / num_threads_th; if (num_blocks_th * num_threads_th < array_size) // Add one more block if odd num_blocks_th++; step_th = num_blocks_th * num_threads_th; }
289b688d86daa76f477e2522566bfba54ea814f1.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "definitions.h" #include "kernel.h" #include <Windows.h> //////////////////////////////////////////////////////////////////////////////////////////////////// // GLOBAL VARIABLES //////////////////////////////////////////////////////////////////////////////////////////////////// // Device images (GPU) byte *dev_src_image; byte *dev_dst_image; // Information about the image to be processed int width; int height; int array_size; // Error management cudaError_t error; // Threshold variables (also Border y Reverse) int num_threads_th; int num_blocks_th; int step_th; // Erode & Dilate variables (also Border) int num_block_x_ed; int num_block_y_ed; int num_threads_x_ed; int num_threads_y_ed; dim3 grid_dim_ed; dim3 block_dim_ed; //////////////////////////////////////////////////////////////////////////////////////////////////// // GPU UTILITY //////////////////////////////////////////////////////////////////////////////////////////////////// // Pointers swap void swap_buffers(byte **a, byte **b) { byte *aux = *a; *a = *b; *b = aux; } //////////////////////////////////////////////////////////////////////////////////////////////////// // THRESHOLD [GRAYSCALE TO BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Threshold kernel __global__ void threshold_kernel(byte *dev_src_image, byte *dev_dst_image, int min, int max, int array_size, int num_threads_th, int step_th) { // Calculate the position of the thread in the grid int pos = blockIdx.x * num_threads_th + threadIdx.x; // Do the operation for (; pos < array_size; pos += step_th) dev_dst_image[pos] = (dev_src_image[pos] >= min && dev_src_image[pos] <= max) ? 1 : 0; } // Public call to Threshold kernel void _threshold(int min, int max, int _num_blocks_th, int _num_threads_th) { // If the user doesn't indicate blocks and threads, these are calculated automatically if (_num_threads_th == 0 && _num_blocks_th == 0) _auto_threshold_parameters(); else { num_threads_th = _num_threads_th; num_blocks_th = _num_blocks_th; step_th = _num_blocks_th * _num_threads_th; } // Threshold kernel call threshold_kernel << <num_blocks_th, num_threads_th >> > (dev_src_image, dev_dst_image, min, max, array_size, num_threads_th, step_th); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // ERODE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Erode kernel __global__ void erode_kernel(byte *dev_src_image, byte *dev_dst_image, int height, int width, int radio) { // Calculate the position of the thread in the grid int posx = threadIdx.x + blockIdx.x * blockDim.x; int posy = threadIdx.y + blockIdx.y * blockDim.y; // Do the operation if (posx + (posy * width) <= (width * height)) { // Calculate the mask limit unsigned int start_i = max(posy - radio, 0); unsigned int end_i = min(height - 1, posy + radio); unsigned int start_j = max(posx - radio, 0); unsigned int end_j = min(width - 1, posx + radio); int _min = 1; // Write the minimum value for (int i = start_i; i <= end_i; i++) for (int j = start_j; j <= end_j; j++) _min = min(_min, dev_src_image[i*width + j]); dev_dst_image[posy * width + posx] = _min; } } // Public call to Erode kernel void _erode(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Erode kernel call erode_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // DILATE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Dilate kernel __global__ void dilate_kernel(byte *dev_src_image, byte *dev_dst_image, int height, int width, int radio) { // Calculate the position of the thread in the grid int posx = threadIdx.x + blockIdx.x * blockDim.x; int posy = threadIdx.y + blockIdx.y * blockDim.y; // Do the operation if (posx + (posy * width) <= (width * height)) { // Calculate the mask limit unsigned int start_i = max(posy - radio, 0); unsigned int end_i = min(height - 1, posy + radio); unsigned int start_j = max(posx - radio, 0); unsigned int end_j = min(width - 1, posx + radio); int _max = 0; // Write the maximum value for (int i = start_i; i <= end_i; i++) for (int j = start_j; j <= end_j; j++) _max = max(_max, dev_src_image[i*width + j]); dev_dst_image[posy * width + posx] = _max; } } // Public call to Dilate kernel void _dilate(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // BORDER [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Border kernel __global__ void border_kernel(byte *dev_src_image, byte *dev_dst_image, int array_size, int num_threads_th, int step_th) { // Calculate the position of the thread in the grid int pos = blockIdx.x * num_threads_th + threadIdx.x; // Do the operation for (; pos < array_size; pos += step_th) dev_dst_image[pos] = (dev_dst_image[pos] == dev_src_image[pos]) ? 0 : 1; } // Public call to Border kernel void _border(int radio, int _num_block_x_db, int _num_block_y_db, int _num_threads_x_db, int _num_threads_y_db, int _num_blocks_ab, int _num_threads_ab) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_db, _num_block_y_db, _num_threads_x_db, _num_threads_y_db); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); // If the user doesn't indicate blocks and threads, these are calculated automatically if (_num_threads_ab == 0 && _num_blocks_ab == 0) _auto_threshold_parameters(); else { num_threads_th = _num_threads_ab; num_blocks_th = _num_blocks_ab; step_th = _num_blocks_ab * _num_threads_ab; } // Border kernel call border_kernel << <num_blocks_th, num_threads_th >> > (dev_src_image, dev_dst_image, array_size, num_threads_th, step_th); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // REVERSE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Reverse kernel __global__ void reverse_kernel(byte *dev_src_image, byte *dev_dst_image, int array_size, int num_threads_th, int step_th) { // Calculate the position of the thread in the grid int pos = blockIdx.x * num_threads_th + threadIdx.x; // Do the operation for (; pos < array_size; pos += step_th) dev_dst_image[pos] = (dev_src_image[pos] == 0) ? 1 : 0; } // Public call to Reverse kernel void _reverse(int _num_blocks_r, int _num_threads_r) { // If the user doesn't indicate blocks and threads, these are calculated automatically if (_num_threads_r == 0 && _num_blocks_r == 0) _auto_threshold_parameters(); else { num_threads_th = _num_threads_r; num_blocks_th = _num_blocks_r; step_th = _num_blocks_r * _num_threads_r; } // Reverse kernel call reverse_kernel << <num_blocks_th, num_threads_th >> > (dev_src_image, dev_dst_image, array_size, num_threads_th, step_th); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // OPEN & CLOSE [BINARY] //////////////////////////////////////////////////////////////////////////////////////////////////// // Public call to Open void _open(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Erode kernel call erode_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); } // Public call to Open void _close(int radio, int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { // If the user doesn't indicate blocks and threads, these are calculated automatically _set_erode_dilate_parameters(_num_block_x_ed, _num_block_y_ed, _num_threads_x_ed, _num_threads_y_ed); // Dilate kernel call dilate_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); // Erode kernel call erode_kernel << <grid_dim_ed, block_dim_ed >> > (dev_src_image, dev_dst_image, height, width, radio); swap_buffers(&dev_src_image, &dev_dst_image); cudaDeviceSynchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////// // CUDA MEMORY MANAGEMENT //////////////////////////////////////////////////////////////////////////////////////////////////// // Reserve GPU memory and copy image from CPU to GPU void _copy_img_to_gpu(byte *hst_src_image, int *error_code) { // Reserve src image memory on GPU error = cudaMalloc(&dev_src_image, array_size); if (error != cudaSuccess) { *error_code = CODE_ERROR_200; exit(EXIT_FAILURE); } // Reserve dst image memory on GPU error = cudaMalloc(&dev_dst_image, array_size); if (error != cudaSuccess) { *error_code = CODE_ERROR_201; exit(EXIT_FAILURE); } // Copy src image memory from CPU to GPU error = cudaMemcpy(dev_src_image, hst_src_image, array_size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { *error_code = CODE_ERROR_202; exit(EXIT_FAILURE); } cudaDeviceSynchronize(); } // Copy image from GPU to CPU void _copy_img_to_cpu(byte *hst_dst_image, int *error_code) { cudaDeviceSynchronize(); // Copy src image memory from GPU to CPU error = cudaMemcpy(hst_dst_image, dev_src_image, array_size, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { *error_code = CODE_ERROR_203; exit(EXIT_FAILURE); } cudaDeviceSynchronize(); } // Free cuda memory void _cuda_free(int *error_code) { // Free src image memory error = cudaFree(dev_src_image); if (error != cudaSuccess) { *error_code = CODE_ERROR_204; exit(EXIT_FAILURE); } //Free dst image memory error = cudaFree(dev_dst_image); if (error != cudaSuccess) { *error_code = CODE_ERROR_205; exit(EXIT_FAILURE); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // PARAMETRIZACIÓN ALGORITMIA E IMÁGENES //////////////////////////////////////////////////////////////////////////////////////////////////// // Set image to be processed parameters void _set_img_info(int _width, int _height) { width = _width; height = _height; array_size = _width * _height; } // Automatic block and thread assignment if the user does't indicate anything void _set_erode_dilate_parameters(int _num_block_x_ed, int _num_block_y_ed, int _num_threads_x_ed, int _num_threads_y_ed) { if (_num_block_x_ed == 0 && _num_block_y_ed == 0 && _num_threads_x_ed == 0 && _num_threads_y_ed == 0) { // Divide by multiple of 2. 16 is good option num_block_x_ed = width / 16; if (num_block_x_ed * 16 < width) num_block_x_ed++; num_block_y_ed = height / 16; if (num_block_y_ed * 16 < height) num_block_y_ed++; num_threads_x_ed = 16; num_threads_y_ed = 16; grid_dim_ed.x = num_block_x_ed; grid_dim_ed.y = num_block_y_ed; block_dim_ed.x = num_threads_x_ed; block_dim_ed.y = num_threads_y_ed; } else { num_block_x_ed = _num_block_x_ed; num_block_y_ed = _num_block_y_ed; num_threads_x_ed = _num_threads_x_ed; num_threads_y_ed = _num_threads_y_ed; grid_dim_ed.x = _num_block_x_ed; grid_dim_ed.y = _num_block_y_ed; block_dim_ed.x = _num_threads_x_ed; block_dim_ed.y = _num_threads_y_ed; } } // Automatic block and thread assignment if the user does't indicate anything void _auto_threshold_parameters() { num_threads_th = 1024; // It's the maximum threads per block num_blocks_th = array_size / num_threads_th; if (num_blocks_th * num_threads_th < array_size) // Add one more block if odd num_blocks_th++; step_th = num_blocks_th * num_threads_th; }
571f2669994fb815a15efbbb446f0741c2d4b977.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <vector> #include "modules/perception/inference/tensorrt/plugins/slice_plugin.h" namespace apollo { namespace perception { namespace inference { typedef int8_t int8; template<typename Dtype> __global__ void Slice(const int nthreads, const Dtype *in_data, const int num_slices, const int slice_size, const int bottom_slice_axis, const int top_slice_axis, const int offset_slice_axis, Dtype *out_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; const int bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; out_data[index] = in_data[bottom_index]; } } int SLICEPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { int slice_size = 1; for (size_t index = axis_ + 1; index < input_dims_.nbDims; index++) { slice_size *= input_dims_.d[index]; } int num_slices = batchSize; for (size_t index = 0; index < axis_; index++) { num_slices *= input_dims_.d[index]; } int offset_slice_axis = 0; for (int i = 0; i < out_slice_dims_.size(); i++) { const int top_slice_axis = out_slice_dims_[i]; const int top_slice_size = top_slice_axis * slice_size; const int nthreads = top_slice_size * num_slices; const int block_num = (nthreads + 511) / 512; Slice // NOLINT_NEXT_LINE(whitespace/operators) << < block_num, 512, 0, stream >> > ( nthreads, (const float *) (inputs[0]), num_slices, slice_size, input_dims_.d[axis_], top_slice_axis, offset_slice_axis, reinterpret_cast<float *>(outputs[i])); offset_slice_axis += top_slice_axis; } return 1; } } // namespace inference } // namespace perception } // namespace apollo
571f2669994fb815a15efbbb446f0741c2d4b977.cu
/****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <vector> #include "modules/perception/inference/tensorrt/plugins/slice_plugin.h" namespace apollo { namespace perception { namespace inference { typedef int8_t int8; template<typename Dtype> __global__ void Slice(const int nthreads, const Dtype *in_data, const int num_slices, const int slice_size, const int bottom_slice_axis, const int top_slice_axis, const int offset_slice_axis, Dtype *out_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; const int bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; out_data[index] = in_data[bottom_index]; } } int SLICEPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { int slice_size = 1; for (size_t index = axis_ + 1; index < input_dims_.nbDims; index++) { slice_size *= input_dims_.d[index]; } int num_slices = batchSize; for (size_t index = 0; index < axis_; index++) { num_slices *= input_dims_.d[index]; } int offset_slice_axis = 0; for (int i = 0; i < out_slice_dims_.size(); i++) { const int top_slice_axis = out_slice_dims_[i]; const int top_slice_size = top_slice_axis * slice_size; const int nthreads = top_slice_size * num_slices; const int block_num = (nthreads + 511) / 512; Slice // NOLINT_NEXT_LINE(whitespace/operators) << < block_num, 512, 0, stream >> > ( nthreads, (const float *) (inputs[0]), num_slices, slice_size, input_dims_.d[axis_], top_slice_axis, offset_slice_axis, reinterpret_cast<float *>(outputs[i])); offset_slice_axis += top_slice_axis; } return 1; } } // namespace inference } // namespace perception } // namespace apollo
81be3fc0a59d564e21adccbcbc48b6f1505a6406.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star2d1r-512-13-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_13(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_12_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(3, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(4, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(5, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(6, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(7, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(8, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(9, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(10, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(11, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(12, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; } } __global__ void kernel0_12(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_11_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(2, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(3, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(4, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(5, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(6, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(7, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(8, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(9, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(10, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(11, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } } __global__ void kernel0_11(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_10_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(1, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(3, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(4, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(5, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(6, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(7, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(8, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(9, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(10, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; } } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
81be3fc0a59d564e21adccbcbc48b6f1505a6406.cu
#include "star2d1r-512-13-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_13(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_12_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(3, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(4, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(5, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(6, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(7, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(8, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(9, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(10, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(11, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(12, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; } } __global__ void kernel0_12(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_11_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(2, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(3, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(4, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(5, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(6, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(7, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(8, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(9, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(10, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(11, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } } __global__ void kernel0_11(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_10_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(1, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(3, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(4, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(5, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(6, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(7, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(8, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(9, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(10, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; } } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
6f0911b3692d5b213733a50d0c252cd2e5581dc4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> // std::string (in struct of params.hpp) #include <vector> // std::vector (for params.hpp) #include <rocblas.h> // hipblasHandle_t (in solver_gpu.hu) #include "../params.hpp" // struct params #include "./solver_gpu.hu" // struct device_pointers // simple phase oscillator model; unwrapped; wrapping in analysis after simulation __global__ void model_phase_oscillator(Real1 *c, Real1 *cnew, int len, Real dt){ Real w=6.28318; // angular frequency; period = 2pi/w = 1.0 int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ cnew[i].x = c[i].x + dt*( w ); } } // simple phase oscillator model; unwrapped; wrapping in analysis after simulation; with heterogeneity __global__ void model_phase_oscillator_whet(Real1 *c, Real1 *cnew, int len, Real dt, Real *het){ //~ Real w=6.28318; // angular frequency; period = 2pi/w = 1.0 int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ cnew[i].x = c[i].x + dt*( het[i] ); } } // zbke2k - modified BZ model // must be double, not float __global__ void model_zbke2k(Real2 *c, Real2 *cnew, int len, Real dt){ Real ooeps1=9.090909090909091; // 1.0/0.11 //~ Real eps2=1.7e-5; //~ Real gamma=1.2; Real gammaEps2=2.04e-5; //~ Real eps3=1.6e-3; Real eps31=1.0016; Real alpha=0.1; Real beta=1.7e-5; Real mu=2.4e-4; Real q=0.7; Real phi=5.25e-4; Real uss=0.0; Real temp=0.0; int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ uss=1.0/(4.0*gammaEps2) * (-(1.0-c[i].y) + sqrt(1.0 + fma(c[i].y,c[i].y,-2.0*c[i].y) + 16.0*gammaEps2*c[i].x)); temp=alpha*c[i].y/(eps31-c[i].y); cnew[i].x=c[i].x+dt*( ooeps1*(phi-c[i].x*c[i].x-c[i].x+gammaEps2*uss*uss+uss*(1.0-c[i].y)+(mu-c[i].x)/(mu+c[i].x)*(q*temp+beta)) ); // u cnew[i].y=c[i].y+dt*(2.0*phi + uss*(1.0-c[i].y) - temp); // v } } // zbke2k - modified BZ model, heterogenous q // must be double, not float __global__ void model_zbke2k_qhet(Real2 *c, Real2 *cnew, int len, Real dt, Real *het){ Real ooeps1=9.090909090909091; // 1.0/0.11 //~ Real eps2=1.7e-5; //~ Real gamma=1.2; Real gammaEps2=2.04e-5; //~ Real eps3=1.6e-3; Real eps31=1.0016; Real alpha=0.1; Real beta=1.7e-5; Real mu=2.4e-4; //~ Real q=0.7; Real phi=5.25e-4; Real uss=0.0; Real temp=0.0; int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ uss=1.0/(4.0*gammaEps2) * (-(1.0-c[i].y) + sqrt(1.0 + fma(c[i].y,c[i].y,-2.0*c[i].y) + 16.0*gammaEps2*c[i].x)); temp=alpha*c[i].y/(eps31-c[i].y); cnew[i].x=c[i].x+dt*( ooeps1*(phi-c[i].x*c[i].x-c[i].x+gammaEps2*uss*uss+uss*(1.0-c[i].y)+(mu-c[i].x)/(mu+c[i].x)*(het[i]*temp+beta)) ); // u, het cnew[i].y=c[i].y+dt*(2.0*phi + uss*(1.0-c[i].y) - temp); // v } } void reaction(device_pointers *d, params &p, streams *s){ int warpsize=32; dim3 nblocks((p.n-1)/warpsize+1); dim3 nthreads(warpsize); switch(p.reactionModel){ case 16:hipLaunchKernelGGL(( model_phase_oscillator), dim3(nblocks),dim3(nthreads),0,s->stream1, (Real1 *)d->c,(Real1 *)d->cnew,p.n,p.dt); break; case 1601:hipLaunchKernelGGL(( model_phase_oscillator_whet), dim3(nblocks),dim3(nthreads),0,s->stream1, (Real1 *)d->c,(Real1 *)d->cnew,p.n,p.dt,d->het); break; case 24:hipLaunchKernelGGL(( model_zbke2k), dim3(nblocks),dim3(nthreads),0,s->stream1, (Real2 *)d->c,(Real2 *)d->cnew,p.n,p.dt); break; case 2401:hipLaunchKernelGGL(( model_zbke2k_qhet), dim3(nblocks),dim3(nthreads),0,s->stream1, (Real2 *)d->c,(Real2 *)d->cnew,p.n,p.dt,d->het); break; default: printf("chosen reactionModel (%d) is not implemented! Program Abort!",p.reactionModel); exit(EXIT_FAILURE); break; } checkCUDAError("reaction()",__LINE__); }
6f0911b3692d5b213733a50d0c252cd2e5581dc4.cu
#include <string> // std::string (in struct of params.hpp) #include <vector> // std::vector (for params.hpp) #include <cublas_v2.h> // cublasHandle_t (in solver_gpu.hu) #include "../params.hpp" // struct params #include "./solver_gpu.hu" // struct device_pointers // simple phase oscillator model; unwrapped; wrapping in analysis after simulation __global__ void model_phase_oscillator(Real1 *c, Real1 *cnew, int len, Real dt){ Real w=6.28318; // angular frequency; period = 2pi/w = 1.0 int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ cnew[i].x = c[i].x + dt*( w ); } } // simple phase oscillator model; unwrapped; wrapping in analysis after simulation; with heterogeneity __global__ void model_phase_oscillator_whet(Real1 *c, Real1 *cnew, int len, Real dt, Real *het){ //~ Real w=6.28318; // angular frequency; period = 2pi/w = 1.0 int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ cnew[i].x = c[i].x + dt*( het[i] ); } } // zbke2k - modified BZ model // must be double, not float __global__ void model_zbke2k(Real2 *c, Real2 *cnew, int len, Real dt){ Real ooeps1=9.090909090909091; // 1.0/0.11 //~ Real eps2=1.7e-5; //~ Real gamma=1.2; Real gammaEps2=2.04e-5; //~ Real eps3=1.6e-3; Real eps31=1.0016; Real alpha=0.1; Real beta=1.7e-5; Real mu=2.4e-4; Real q=0.7; Real phi=5.25e-4; Real uss=0.0; Real temp=0.0; int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ uss=1.0/(4.0*gammaEps2) * (-(1.0-c[i].y) + sqrt(1.0 + fma(c[i].y,c[i].y,-2.0*c[i].y) + 16.0*gammaEps2*c[i].x)); temp=alpha*c[i].y/(eps31-c[i].y); cnew[i].x=c[i].x+dt*( ooeps1*(phi-c[i].x*c[i].x-c[i].x+gammaEps2*uss*uss+uss*(1.0-c[i].y)+(mu-c[i].x)/(mu+c[i].x)*(q*temp+beta)) ); // u cnew[i].y=c[i].y+dt*(2.0*phi + uss*(1.0-c[i].y) - temp); // v } } // zbke2k - modified BZ model, heterogenous q // must be double, not float __global__ void model_zbke2k_qhet(Real2 *c, Real2 *cnew, int len, Real dt, Real *het){ Real ooeps1=9.090909090909091; // 1.0/0.11 //~ Real eps2=1.7e-5; //~ Real gamma=1.2; Real gammaEps2=2.04e-5; //~ Real eps3=1.6e-3; Real eps31=1.0016; Real alpha=0.1; Real beta=1.7e-5; Real mu=2.4e-4; //~ Real q=0.7; Real phi=5.25e-4; Real uss=0.0; Real temp=0.0; int i = threadIdx.x + blockDim.x*blockIdx.x; if(i<len){ uss=1.0/(4.0*gammaEps2) * (-(1.0-c[i].y) + sqrt(1.0 + fma(c[i].y,c[i].y,-2.0*c[i].y) + 16.0*gammaEps2*c[i].x)); temp=alpha*c[i].y/(eps31-c[i].y); cnew[i].x=c[i].x+dt*( ooeps1*(phi-c[i].x*c[i].x-c[i].x+gammaEps2*uss*uss+uss*(1.0-c[i].y)+(mu-c[i].x)/(mu+c[i].x)*(het[i]*temp+beta)) ); // u, het cnew[i].y=c[i].y+dt*(2.0*phi + uss*(1.0-c[i].y) - temp); // v } } void reaction(device_pointers *d, params &p, streams *s){ int warpsize=32; dim3 nblocks((p.n-1)/warpsize+1); dim3 nthreads(warpsize); switch(p.reactionModel){ case 16: model_phase_oscillator<<<nblocks,nthreads,0,s->stream1>>>((Real1 *)d->c,(Real1 *)d->cnew,p.n,p.dt); break; case 1601: model_phase_oscillator_whet<<<nblocks,nthreads,0,s->stream1>>>((Real1 *)d->c,(Real1 *)d->cnew,p.n,p.dt,d->het); break; case 24: model_zbke2k<<<nblocks,nthreads,0,s->stream1>>>((Real2 *)d->c,(Real2 *)d->cnew,p.n,p.dt); break; case 2401: model_zbke2k_qhet<<<nblocks,nthreads,0,s->stream1>>>((Real2 *)d->c,(Real2 *)d->cnew,p.n,p.dt,d->het); break; default: printf("chosen reactionModel (%d) is not implemented! Program Abort!",p.reactionModel); exit(EXIT_FAILURE); break; } checkCUDAError("reaction()",__LINE__); }
b7ce468f7d8dcaa59fc63c8fde50183ecdf69289.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* *Alfredo Luque *CUDA 3-layer MLP */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <nn.h> #include <array_reduction.h> #include <csv_v2.h> using namespace std; float* initW(unsigned int nW) { float * weights; weights = new float[nW]; srand(time(0)); for(int i = 0; i < nW ; i++) { weights[i] = ((float)((rand() % 1001)-1000))/1000.0; } return weights; } void sumArray(float* errIn, float* partsum , float* error) { //sum errors to blockdim numbers hipLaunchKernelGGL(( sumreduce), dim3(N/blockSize),dim3(blockSize), 0, 0, errIn,partsum,N); //reduces into redBlocks floats hipLaunchKernelGGL(( sumreduce), dim3(1),dim3(blockSize), 0, 0, partsum,error,N); //reduces to final sum } void sumVec(float* a , float* b, float* c) //a+b=c { // Fill Arrays for (int i = 0; i < N; i++) hipMemcpy(dev_a, a, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, N*sizeof(float), hipMemcpyDeviceToHost); } float* loadInputs(float* dev_array, bool inputfile) { if(inputfile) { io::CSVReader<3> in("inputs.csv"); //just rename the file inputs.csv } else { io::CSVReader<3> in("outputs.csv"); //just rename the file outputs.csv } in.read_header(io::ignore_extra_column, "Weather", "OnTime", "Delay/Out"); std::string vendor; int size; double speed; while(in.read_row(vendor, size, speed)){ for(int i = 0 ; i < IN) { hipMemcpy(row[i], dev_in , N*sizeof(float), hipMemcpyHostToDevice); } } } int main(void) { //load training data //Finalize block dimensions //Initialize Weights unsigned int nW = (unsigned int)((IN+1)*HN*(LAYERS-1)); //One bias per hidden and output layer float* wSeeds; wSeeds = initW(nW); float* dev_w; hipMalloc((void**)&dev_w,(sizeof(float)*nW)); hipMemcpy(wSeeds , dev_w , sizeof(float)*nW , hipMemcpyHostToDevice); //Generate Device Error Array float* dev_errIn; float* dev_errPartSum; float* dev_error; hipMalloc((void**)&dev_errIn,(sizeof(float)*N)); hipMalloc((void**)&dev_errPartSum,(sizeof(float)*N)); hipMalloc((void**)&dev_error,sizeof(float)); //Generate Device Gross Update Array float* dev_grossUp; hipMalloc((void**)&dev_grossUp,N*sizeof(float)*nW); //Generate Prev Update Arrays (Momentum) float* dev_prevUp; hipMalloc((void**)&dev_prevUp,sizeof(float)*nW); float* dev_tdi hipMalloc((void**)&dev_prevUp,sizeof(float)*IN*N); float* dev_tdo; hipMalloc((void**)&dev_prevUp,sizeof(float)*ON*N); //usually just size N*sizeof(float) since I have one output neuron loadinputs(dev_tdi,true); loadinputs(dev_tdo,false); //Iterate Backpropagation! for(int i = 0 ; i < EPOCHS ; i++) { hipLaunchKernelGGL(( kernBackProp), dim3(N),dim3(HN), 0, 0, dev_tdi, dev_tdo, dev_w ,dev_grossUp , dev_prevUp dev_errIn); sumArray(dev_errIn, dev_error); //we can output this somewhere to create convergence charts for(int j = 0 ; j < N ; j++) { sumVec(&grossUp[i] , &grossUp[i+1] , &dev_weights); //compute net changes } hipMemcpy(dev_weights, wei) } hipMemcpy(dev_weights, weights, (sizeof(float)*nW) , hipMemcpyDeviceToHost ); //retrieve weights (that's what we're after!). return 0; }
b7ce468f7d8dcaa59fc63c8fde50183ecdf69289.cu
/* *Alfredo Luque *CUDA 3-layer MLP */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <nn.h> #include <array_reduction.h> #include <csv_v2.h> using namespace std; float* initW(unsigned int nW) { float * weights; weights = new float[nW]; srand(time(0)); for(int i = 0; i < nW ; i++) { weights[i] = ((float)((rand() % 1001)-1000))/1000.0; } return weights; } void sumArray(float* errIn, float* partsum , float* error) { //sum errors to blockdim numbers sumreduce<<<N/blockSize,blockSize>>>(errIn,partsum,N); //reduces into redBlocks floats sumreduce<<<1,blockSize>>>(partsum,error,N); //reduces to final sum } void sumVec(float* a , float* b, float* c) //a+b=c { // Fill Arrays for (int i = 0; i < N; i++) cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice); add<<<N,1>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, N*sizeof(float), cudaMemcpyDeviceToHost); } float* loadInputs(float* dev_array, bool inputfile) { if(inputfile) { io::CSVReader<3> in("inputs.csv"); //just rename the file inputs.csv } else { io::CSVReader<3> in("outputs.csv"); //just rename the file outputs.csv } in.read_header(io::ignore_extra_column, "Weather", "OnTime", "Delay/Out"); std::string vendor; int size; double speed; while(in.read_row(vendor, size, speed)){ for(int i = 0 ; i < IN) { cudaMemcpy(row[i], dev_in , N*sizeof(float), cudaMemcpyHostToDevice); } } } int main(void) { //load training data //Finalize block dimensions //Initialize Weights unsigned int nW = (unsigned int)((IN+1)*HN*(LAYERS-1)); //One bias per hidden and output layer float* wSeeds; wSeeds = initW(nW); float* dev_w; cudaMalloc((void**)&dev_w,(sizeof(float)*nW)); cudaMemcpy(wSeeds , dev_w , sizeof(float)*nW , cudaMemcpyHostToDevice); //Generate Device Error Array float* dev_errIn; float* dev_errPartSum; float* dev_error; cudaMalloc((void**)&dev_errIn,(sizeof(float)*N)); cudaMalloc((void**)&dev_errPartSum,(sizeof(float)*N)); cudaMalloc((void**)&dev_error,sizeof(float)); //Generate Device Gross Update Array float* dev_grossUp; cudaMalloc((void**)&dev_grossUp,N*sizeof(float)*nW); //Generate Prev Update Arrays (Momentum) float* dev_prevUp; cudaMalloc((void**)&dev_prevUp,sizeof(float)*nW); float* dev_tdi cudaMalloc((void**)&dev_prevUp,sizeof(float)*IN*N); float* dev_tdo; cudaMalloc((void**)&dev_prevUp,sizeof(float)*ON*N); //usually just size N*sizeof(float) since I have one output neuron loadinputs(dev_tdi,true); loadinputs(dev_tdo,false); //Iterate Backpropagation! for(int i = 0 ; i < EPOCHS ; i++) { kernBackProp<<<N,HN>>>(dev_tdi, dev_tdo, dev_w ,dev_grossUp , dev_prevUp dev_errIn); sumArray(dev_errIn, dev_error); //we can output this somewhere to create convergence charts for(int j = 0 ; j < N ; j++) { sumVec(&grossUp[i] , &grossUp[i+1] , &dev_weights); //compute net changes } cudaMemcpy(dev_weights, wei) } cudaMemcpy(dev_weights, weights, (sizeof(float)*nW) , cudaMemcpyDeviceToHost ); //retrieve weights (that's what we're after!). return 0; }
af7c1667be4a13637ee52e94c6e84629d2dfa0e4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include <assert.h> #include <chrono> template <typename T, typename C> __global__ void sub(T* output, const C* starter, const C* stopper, int64_t startsoffset, int64_t stopsoffset, int64_t n) { int thid = threadIdx.x + blockIdx.x * blockDim.x; if (thid < n) { C start = starter[thid + startsoffset]; C stop = stopper[thid + stopsoffset]; assert(start <= stop); output[thid] = stop - start; } } template <typename T, typename C> void prefix_sum(T* output, const C* arr, const C* arr2, int64_t startsoffset, int64_t stopsoffset, int64_t length) { int block, thread; if (length > 1024) { block = (length / 1024) + 1; thread = 1024; } else { thread = length; block = 1; } T* d_output; C* d_arr, * d_arr2; hipMalloc((void**)&d_output, length * sizeof(T)); hipMalloc((void**)&d_arr, length * sizeof(C)); hipMemcpy(d_arr, arr, length * sizeof(C), hipMemcpyHostToDevice); hipMalloc((void**)&d_arr2, length * sizeof(C)); hipMemcpy(d_arr2, arr2, length * sizeof(C), hipMemcpyHostToDevice); hipLaunchKernelGGL(( sub<T, C>) , dim3(block), dim3(thread), 0, 0, d_output, d_arr, d_arr2, startsoffset, stopsoffset, length); hipDeviceSynchronize(); thrust::device_vector<T> data(d_output, d_output + length); thrust::device_vector<T> temp(data.size() + 1); thrust::exclusive_scan(data.begin(), data.end(), temp.begin()); temp[data.size()] = data.back() + temp[data.size() - 1]; thrust::copy(temp.begin(), temp.end(), output); hipFree(d_output); hipFree(d_arr); hipFree(d_arr2); } template <typename C, typename T> void foo(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) { tooffsets[0] = 0; for (int64_t i = 0; i < length; i++) { C start = fromstarts[startsoffset + i]; C stop = fromstops[stopsoffset + i]; assert(start <= stop); tooffsets[i + 1] = tooffsets[i] + (stop - start); } } template <typename T> bool compare(T* arr1, T* arr2, int n) { for (int i=0; i<n; i++) { if (arr1[i] != arr2[i]) return false; } return true; } int main() { int const size = 60000; int starter[size], stopper[size], output[size + 1], output2[size + 1]; for (int i = 0; i < size; i++) { starter[i] = i; stopper[i] = i + 1; } prefix_sum<int, int>(output, starter, stopper, 0, 0, size); // GPU Warm up hipDeviceSynchronize(); auto start1 = std::chrono::high_resolution_clock::now(); prefix_sum<int, int>(output, starter, stopper, 0, 0, size); hipDeviceSynchronize(); auto stop1 = std::chrono::high_resolution_clock::now(); auto time1 = std::chrono::duration_cast<std::chrono::microseconds>(stop1 - start1); std::cout << "Time taken for GPU = " << time1.count() << "\n"; auto start2 = std::chrono::high_resolution_clock::now(); foo<int, int>(output2, starter, stopper, 0, 0, size); auto stop2 = std::chrono::high_resolution_clock::now(); auto time2 = std::chrono::duration_cast<std::chrono::microseconds>(stop2 - start2); std::cout << "Time taken for CPU = " << time2.count() << "\n"; for (int i=0; i<size; i++) { if (output2[i] != output[i]) { std::cout << "FALSE" << std::endl; return 0; } } return 0; }
af7c1667be4a13637ee52e94c6e84629d2dfa0e4.cu
#include <iostream> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" #include <assert.h> #include <chrono> template <typename T, typename C> __global__ void sub(T* output, const C* starter, const C* stopper, int64_t startsoffset, int64_t stopsoffset, int64_t n) { int thid = threadIdx.x + blockIdx.x * blockDim.x; if (thid < n) { C start = starter[thid + startsoffset]; C stop = stopper[thid + stopsoffset]; assert(start <= stop); output[thid] = stop - start; } } template <typename T, typename C> void prefix_sum(T* output, const C* arr, const C* arr2, int64_t startsoffset, int64_t stopsoffset, int64_t length) { int block, thread; if (length > 1024) { block = (length / 1024) + 1; thread = 1024; } else { thread = length; block = 1; } T* d_output; C* d_arr, * d_arr2; cudaMalloc((void**)&d_output, length * sizeof(T)); cudaMalloc((void**)&d_arr, length * sizeof(C)); cudaMemcpy(d_arr, arr, length * sizeof(C), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_arr2, length * sizeof(C)); cudaMemcpy(d_arr2, arr2, length * sizeof(C), cudaMemcpyHostToDevice); sub<T, C> <<<block, thread>>>(d_output, d_arr, d_arr2, startsoffset, stopsoffset, length); cudaDeviceSynchronize(); thrust::device_vector<T> data(d_output, d_output + length); thrust::device_vector<T> temp(data.size() + 1); thrust::exclusive_scan(data.begin(), data.end(), temp.begin()); temp[data.size()] = data.back() + temp[data.size() - 1]; thrust::copy(temp.begin(), temp.end(), output); cudaFree(d_output); cudaFree(d_arr); cudaFree(d_arr2); } template <typename C, typename T> void foo(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) { tooffsets[0] = 0; for (int64_t i = 0; i < length; i++) { C start = fromstarts[startsoffset + i]; C stop = fromstops[stopsoffset + i]; assert(start <= stop); tooffsets[i + 1] = tooffsets[i] + (stop - start); } } template <typename T> bool compare(T* arr1, T* arr2, int n) { for (int i=0; i<n; i++) { if (arr1[i] != arr2[i]) return false; } return true; } int main() { int const size = 60000; int starter[size], stopper[size], output[size + 1], output2[size + 1]; for (int i = 0; i < size; i++) { starter[i] = i; stopper[i] = i + 1; } prefix_sum<int, int>(output, starter, stopper, 0, 0, size); // GPU Warm up cudaDeviceSynchronize(); auto start1 = std::chrono::high_resolution_clock::now(); prefix_sum<int, int>(output, starter, stopper, 0, 0, size); cudaDeviceSynchronize(); auto stop1 = std::chrono::high_resolution_clock::now(); auto time1 = std::chrono::duration_cast<std::chrono::microseconds>(stop1 - start1); std::cout << "Time taken for GPU = " << time1.count() << "\n"; auto start2 = std::chrono::high_resolution_clock::now(); foo<int, int>(output2, starter, stopper, 0, 0, size); auto stop2 = std::chrono::high_resolution_clock::now(); auto time2 = std::chrono::duration_cast<std::chrono::microseconds>(stop2 - start2); std::cout << "Time taken for CPU = " << time2.count() << "\n"; for (int i=0; i<size; i++) { if (output2[i] != output[i]) { std::cout << "FALSE" << std::endl; return 0; } } return 0; }
37a4a3f1662ee1e6246388f1a33782232c2c5375.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip" #else accreal THCTensor_(sumall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return val; } #if !defined(THC_REAL_IS_BOOL) void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim<scalar_t>(state, self, src, thrust::identity<accreal>{}, ReduceMultiply<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(1), dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THCTensor *self_; THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0); THCTensor *data = THCTensor_(newClone)(state, src_); int64_t numel = THCTensor_(nElement)(state, data); THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension"); THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions"); if (numel > 0) { ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0); dim3 grid( THTensor_sizeLegacyNoScalars(data, 0)); // NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel. dim3 threads(32); hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>) , dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm)); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); } THCTensor_(free)(state, src_); self_ = THCTensor_(newTranspose)(state, data, dimension, 0); THCTensor_(resizeAs)(state, self, self_); THCTensor_(freeCopyTo)(state, self_, self); THCTensor_(free)(state, data); } void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); WelfordData<accreal, scalar_t> init; init.reset(); if (!THC_reduceDim<scalar_t>(state, self_, src, ModifyWelford<WelfordData<accreal, scalar_t>>{}, ReduceWelford<accreal, scalar_t>{}, VarianceWelford<accreal, scalar_t>{biased, true}, init, dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); WelfordData<accreal, scalar_t> init; init.reset(); if (!THC_reduceDim<scalar_t>(state, self_, src, ModifyWelford<WelfordData<accreal, scalar_t>>{}, ReduceWelford<accreal, scalar_t>{}, VarianceWelford<accreal, scalar_t>{biased, false}, init, dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased))); } accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal mean = THCTensor_(meanall)(state, self); accreal val; if (!THC_reduceAll<scalar_t>(state, self, SquareFunctor<accreal>(mean), ReduceAdd<accreal>(), scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1))) ); THCudaCheck(hipGetLastError()); return val; } void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { THC_reduceDim<scalar_t>(state, self, src, TensorNonZeroOp<accreal>{}, ReduceAdd<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 1>{value}, ReduceAdd<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 2>{value}, ReduceAdd<accreal>{}, ReducePow<accreal>{scalar_cast<accreal>(.5)}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 1>{value}, ReduceMax<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 1>{value}, ReduceMin<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(INFINITY), dimension, keepdim); } else { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, -1>{value}, ReduceAdd<accreal>{}, ReducePow<accreal>{THCNumerics<accreal>::cinv(value)}, scalar_cast<accreal>(0), dimension, keepdim); } THCudaCheck(hipGetLastError()); } accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal result; if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { THC_reduceAll<scalar_t>(state, self, TensorNonZeroOp<accreal>{}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 1>{value}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 2>{value}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); result = THCNumerics<accreal>::sqrt(result); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 1>{value}, ReduceMax<accreal>{}, scalar_cast<accreal>(0), &result, 0); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 1>{value}, ReduceMin<accreal>{}, scalar_cast<accreal>(INFINITY), &result, 0); } else { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, -1>{value}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value)); } THCudaCheck(hipGetLastError()); return result; } accreal THCTensor_(dist)(THCState *state, THCTensor *self, THCTensor *src, scalar_t _value) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); self = THCTensor_(newContiguous)(state, self); ptrdiff_t size = THCTensor_(nElement)(state, self); src = THCTensor_(newContiguous)(state, src); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src)); THCThrustAllocator thrustAlloc(state); accreal result; if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), ReduceMax<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY), ReduceMin<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0))); } else { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(value)); result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value)); } THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; } #endif accreal THCTensor_(meanall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self); } scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMin<accreal>{}, THCNumerics<accreal>::upper_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return scalar_cast<scalar_t>(val); } scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMax<accreal>{}, THCNumerics<accreal>::lower_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return scalar_cast<scalar_t>(val); } void THCTensor_(max)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::lower_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MaxValuePair<scalar_t, int64_t>()); } void THCTensor_(min)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::upper_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MinValuePair<scalar_t, int64_t>()); } #endif #endif
37a4a3f1662ee1e6246388f1a33782232c2c5375.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu" #else accreal THCTensor_(sumall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return val; } #if !defined(THC_REAL_IS_BOOL) void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim<scalar_t>(state, self, src, thrust::identity<accreal>{}, ReduceMultiply<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(1), dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THCTensor *self_; THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0); THCTensor *data = THCTensor_(newClone)(state, src_); int64_t numel = THCTensor_(nElement)(state, data); THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension"); THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions"); if (numel > 0) { ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0); dim3 grid( THTensor_sizeLegacyNoScalars(data, 0)); // NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel. dim3 threads(32); THCTensor_kernel_renorm<scalar_t, accreal> <<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm)); cudaError_t errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); } THCTensor_(free)(state, src_); self_ = THCTensor_(newTranspose)(state, data, dimension, 0); THCTensor_(resizeAs)(state, self, self_); THCTensor_(freeCopyTo)(state, self_, self); THCTensor_(free)(state, data); } void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); WelfordData<accreal, scalar_t> init; init.reset(); if (!THC_reduceDim<scalar_t>(state, self_, src, ModifyWelford<WelfordData<accreal, scalar_t>>{}, ReduceWelford<accreal, scalar_t>{}, VarianceWelford<accreal, scalar_t>{biased, true}, init, dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); WelfordData<accreal, scalar_t> init; init.reset(); if (!THC_reduceDim<scalar_t>(state, self_, src, ModifyWelford<WelfordData<accreal, scalar_t>>{}, ReduceWelford<accreal, scalar_t>{}, VarianceWelford<accreal, scalar_t>{biased, false}, init, dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased))); } accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal mean = THCTensor_(meanall)(state, self); accreal val; if (!THC_reduceAll<scalar_t>(state, self, SquareFunctor<accreal>(mean), ReduceAdd<accreal>(), scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1))) ); THCudaCheck(cudaGetLastError()); return val; } void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { THC_reduceDim<scalar_t>(state, self, src, TensorNonZeroOp<accreal>{}, ReduceAdd<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 1>{value}, ReduceAdd<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 2>{value}, ReduceAdd<accreal>{}, ReducePow<accreal>{scalar_cast<accreal>(.5)}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 1>{value}, ReduceMax<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(0), dimension, keepdim); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, 1>{value}, ReduceMin<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(INFINITY), dimension, keepdim); } else { THC_reduceDim<scalar_t>(state, self, src, TensorNormOp<accreal, -1>{value}, ReduceAdd<accreal>{}, ReducePow<accreal>{THCNumerics<accreal>::cinv(value)}, scalar_cast<accreal>(0), dimension, keepdim); } THCudaCheck(cudaGetLastError()); } accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal result; if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { THC_reduceAll<scalar_t>(state, self, TensorNonZeroOp<accreal>{}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 1>{value}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 2>{value}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); result = THCNumerics<accreal>::sqrt(result); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 1>{value}, ReduceMax<accreal>{}, scalar_cast<accreal>(0), &result, 0); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, 1>{value}, ReduceMin<accreal>{}, scalar_cast<accreal>(INFINITY), &result, 0); } else { THC_reduceAll<scalar_t>(state, self, TensorNormOp<accreal, -1>{value}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &result, 0); result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value)); } THCudaCheck(cudaGetLastError()); return result; } accreal THCTensor_(dist)(THCState *state, THCTensor *self, THCTensor *src, scalar_t _value) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); self = THCTensor_(newContiguous)(state, self); ptrdiff_t size = THCTensor_(nElement)(state, self); src = THCTensor_(newContiguous)(state, src); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src)); THCThrustAllocator thrustAlloc(state); accreal result; if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), ReduceMax<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY), ReduceMin<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0))); } else { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(value)); result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value)); } THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; } #endif accreal THCTensor_(meanall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self); } scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMin<accreal>{}, THCNumerics<accreal>::upper_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return scalar_cast<scalar_t>(val); } scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMax<accreal>{}, THCNumerics<accreal>::lower_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return scalar_cast<scalar_t>(val); } void THCTensor_(max)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::lower_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MaxValuePair<scalar_t, int64_t>()); } void THCTensor_(min)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::upper_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MinValuePair<scalar_t, int64_t>()); } #endif #endif
6e10fd64ca1d2dea6591c7f762e0a59a6751853a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const Nd4jLong index = x[xOffset]; const auto zOffset = shape::getIndexOffset(index, zShapeInfo); z[zOffset] = i; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { hipLaunchKernelGGL(( invertPermutationCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo); } //////////////////////////////////////////////////////////////////////// void invertPermutation(sd::LaunchContext* context, const NDArray& input, NDArray& output) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "invertPermutation"); NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ T* sharedMem; __shared__ int xRank, zRank, *coordsMem; // xRank = zRank + 2 __shared__ Nd4jLong xLen, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<T*>(shmem); coordsMem = reinterpret_cast<int*>(shmem + blockDim.x * sizeof(T)); xRank = shape::rank(xShapeInfo); zRank = shape::rank(zShapeInfo); xLen = shape::length(xShapeInfo); zLen = shape::length(zShapeInfo); // corresponds to number of matrices } __syncthreads(); auto coords = coordsMem + threadIdx.x * xRank; for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix shape::index2coords(m, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); sharedMem[threadIdx.x] = 0; for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) { coords[zRank] = coords[zRank + 1] = i; const auto xOffset = shape::getOffset(xShapeInfo, coords); sharedMem[threadIdx.x] += x[xOffset]; } __syncthreads(); // aggregate sum for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[zOffset] = *sharedMem; __syncthreads(); } } /////////////////////////////////////////////////////////////////// template<typename T> static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint diagLen) { hipLaunchKernelGGL(( traceCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diagLen); } /////////////////////////////////////////////////////////////////// void trace(sd::LaunchContext* context, const NDArray& input, NDArray& output) { PointersManager manager(context, "trace"); const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * (sizeof(int) * input.rankOf() + input.sizeOfT()) + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int rank, areSameOffsets, *sharedMem; // xRank = zRank __shared__ Nd4jLong len, totalThreads; // xLen = zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); rank = shape::rank(xShapeInfo); len = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col z[zOffset] = 0; else z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords)]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { hipLaunchKernelGGL(( triuBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diag); } /////////////////////////////////////////////////////////////////// void triuBP(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) { const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * gradO.rankOf() + 128; PointersManager manager(context, "triuBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int xRank, zRank, *sharedMem; // xRank >= zRank __shared__ Nd4jLong numOfXOffsets, zLen, totalThreads; // xLen >= zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); xRank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); numOfXOffsets = shape::length(xShapeInfo) / zLen; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto memBuff = sharedMem + threadIdx.x * 2 * xRank; auto xOffsets = globMem + tid * numOfXOffsets; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const auto zOffset = shape::getIndexOffset(i, zShapeInfo); shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff); z[zOffset] = x[xOffsets[0]]; // first offset for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets z[zOffset] += x[xOffsets[j]]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { hipLaunchKernelGGL(( tileBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, globMem); } ////////////////////////////////////////////////////////////////////////// void tileBP(sd::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) { NDArray memBuff('c', gradO.getShapeAsVector(), sd::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * 2 * gradO.rankOf() + 128; PointersManager manager(context, "tileBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff}); BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff}); manager.synchronize(); } template <typename T> static __global__ void swapShuffleKernel(T* input, Nd4jLong const* shape, Nd4jLong firstDim, sd::graph::RandomGenerator* rng) { auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; if (i != r) { const auto iOffset = shape::getIndexOffset(i, shape); const auto rOffset = shape::getIndexOffset(r, shape); T e0 = input[iOffset]; T e1 = input[rOffset]; //math::nd4j_swap<T>(input(i), input(r)); input[iOffset] = e1; input[rOffset] = e0; } } } template <typename T> static __global__ void fillShuffleKernel(T* input, Nd4jLong const* inputShape, T* output, Nd4jLong const* outputShape, Nd4jLong firstDim, int* indices, sd::graph::RandomGenerator* rng) { // PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold()) auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; output[shape::getIndexOffset(i, outputShape)] = input[shape::getIndexOffset(indices[r], inputShape)]; if(i != r) { output[shape::getIndexOffset(r, outputShape)] = input[shape::getIndexOffset(indices[i], inputShape)]; // output.p(r, input.e<T>(indices[i])); // math::nd4j_swap<int>(indices[i], indices[r]); atomicExch(&indices[i], indices[r]); } } } ////////////////////////////////////////////////////////////////////////// template <typename T> void randomShuffle_(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) { // check edge cases first int temp; const int firstDim = input.sizeAt(0); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input}); if(input.lengthOf() == 1 || firstDim == 1) { if(!isInplace) output.assign(input); } else if (input.isVector() || shape::isLikeVector(input.shapeInfo(), temp)) { // apply Fisher-Yates shuffle sd::graph::RandomGenerator* dRandom = nullptr; hipMalloc(&dRandom, sizeof(sd::graph::RandomGenerator)); hipMemcpy(dRandom, &rng, sizeof(sd::graph::RandomGenerator), hipMemcpyHostToDevice); T* inputBuf = reinterpret_cast<T*>(input.specialBuffer()); if(isInplace) { hipLaunchKernelGGL(( swapShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), firstDim, dRandom); } else { std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); hipMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), hipMemcpyDeviceToDevice); //output.p<T>(Nd4jLong(0), input.e<T>(0)); PointersManager pointersManager(context, "helper::randomShuffle_"); int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int))); T* outputBuf = reinterpret_cast<T*>(output.specialBuffer()); hipLaunchKernelGGL(( fillShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, indicesDev, dRandom); pointersManager.synchronize(); } // rng.rewindH(firstDim - 1); hipFree(dRandom); } else { // evaluate sub-arrays list of input array through all dimensions excluding first one std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0}); auto subArrsListIn = input.allTensorsAlongDimension(dimensions); // apply Fisher-Yates shuffle if(isInplace) { for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; if(i != r) subArrsListIn.at(i)->swapUnsafe(*subArrsListIn.at(r)); } } else { // evaluate sub-arrays list of output array through all dimensions excluding first one auto subArrsListOut = output.allTensorsAlongDimension(dimensions); std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); bool isZeroShuffled = false; for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; subArrsListOut.at(i)->assign(subArrsListIn.at(indices[r])); if(r == 0) isZeroShuffled = true; if(i != r) { subArrsListOut.at(r)->assign(subArrsListIn.at(indices[i])); math::nd4j_swap<int>(indices[i], indices[r]); } } if(!isZeroShuffled) subArrsListOut.at(0)->assign(subArrsListIn.at(0)); } rng.rewindH(firstDim-1); } NDArray::registerSpecialUse({&output}, {&input}); } void randomShuffle(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) { BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES); ////////////////////////////////////////////////////////////////////////// void eye(sd::LaunchContext * context, NDArray& output) { output.setIdentity(); } } } }
6e10fd64ca1d2dea6591c7f762e0a59a6751853a.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const Nd4jLong index = x[xOffset]; const auto zOffset = shape::getIndexOffset(index, zShapeInfo); z[zOffset] = i; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { invertPermutationCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo); } //////////////////////////////////////////////////////////////////////// void invertPermutation(sd::LaunchContext* context, const NDArray& input, NDArray& output) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "invertPermutation"); NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ T* sharedMem; __shared__ int xRank, zRank, *coordsMem; // xRank = zRank + 2 __shared__ Nd4jLong xLen, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<T*>(shmem); coordsMem = reinterpret_cast<int*>(shmem + blockDim.x * sizeof(T)); xRank = shape::rank(xShapeInfo); zRank = shape::rank(zShapeInfo); xLen = shape::length(xShapeInfo); zLen = shape::length(zShapeInfo); // corresponds to number of matrices } __syncthreads(); auto coords = coordsMem + threadIdx.x * xRank; for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix shape::index2coords(m, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); sharedMem[threadIdx.x] = 0; for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) { coords[zRank] = coords[zRank + 1] = i; const auto xOffset = shape::getOffset(xShapeInfo, coords); sharedMem[threadIdx.x] += x[xOffset]; } __syncthreads(); // aggregate sum for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[zOffset] = *sharedMem; __syncthreads(); } } /////////////////////////////////////////////////////////////////// template<typename T> static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint diagLen) { traceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diagLen); } /////////////////////////////////////////////////////////////////// void trace(sd::LaunchContext* context, const NDArray& input, NDArray& output) { PointersManager manager(context, "trace"); const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * (sizeof(int) * input.rankOf() + input.sizeOfT()) + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int rank, areSameOffsets, *sharedMem; // xRank = zRank __shared__ Nd4jLong len, totalThreads; // xLen = zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); rank = shape::rank(xShapeInfo); len = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col z[zOffset] = 0; else z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords)]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { triuBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diag); } /////////////////////////////////////////////////////////////////// void triuBP(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) { const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * gradO.rankOf() + 128; PointersManager manager(context, "triuBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int xRank, zRank, *sharedMem; // xRank >= zRank __shared__ Nd4jLong numOfXOffsets, zLen, totalThreads; // xLen >= zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); xRank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); numOfXOffsets = shape::length(xShapeInfo) / zLen; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto memBuff = sharedMem + threadIdx.x * 2 * xRank; auto xOffsets = globMem + tid * numOfXOffsets; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const auto zOffset = shape::getIndexOffset(i, zShapeInfo); shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff); z[zOffset] = x[xOffsets[0]]; // first offset for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets z[zOffset] += x[xOffsets[j]]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { tileBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, globMem); } ////////////////////////////////////////////////////////////////////////// void tileBP(sd::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) { NDArray memBuff('c', gradO.getShapeAsVector(), sd::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * 2 * gradO.rankOf() + 128; PointersManager manager(context, "tileBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff}); BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff}); manager.synchronize(); } template <typename T> static __global__ void swapShuffleKernel(T* input, Nd4jLong const* shape, Nd4jLong firstDim, sd::graph::RandomGenerator* rng) { auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; if (i != r) { const auto iOffset = shape::getIndexOffset(i, shape); const auto rOffset = shape::getIndexOffset(r, shape); T e0 = input[iOffset]; T e1 = input[rOffset]; //math::nd4j_swap<T>(input(i), input(r)); input[iOffset] = e1; input[rOffset] = e0; } } } template <typename T> static __global__ void fillShuffleKernel(T* input, Nd4jLong const* inputShape, T* output, Nd4jLong const* outputShape, Nd4jLong firstDim, int* indices, sd::graph::RandomGenerator* rng) { // PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold()) auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; output[shape::getIndexOffset(i, outputShape)] = input[shape::getIndexOffset(indices[r], inputShape)]; if(i != r) { output[shape::getIndexOffset(r, outputShape)] = input[shape::getIndexOffset(indices[i], inputShape)]; // output.p(r, input.e<T>(indices[i])); // math::nd4j_swap<int>(indices[i], indices[r]); atomicExch(&indices[i], indices[r]); } } } ////////////////////////////////////////////////////////////////////////// template <typename T> void randomShuffle_(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) { // check edge cases first int temp; const int firstDim = input.sizeAt(0); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input}); if(input.lengthOf() == 1 || firstDim == 1) { if(!isInplace) output.assign(input); } else if (input.isVector() || shape::isLikeVector(input.shapeInfo(), temp)) { // apply Fisher-Yates shuffle sd::graph::RandomGenerator* dRandom = nullptr; cudaMalloc(&dRandom, sizeof(sd::graph::RandomGenerator)); cudaMemcpy(dRandom, &rng, sizeof(sd::graph::RandomGenerator), cudaMemcpyHostToDevice); T* inputBuf = reinterpret_cast<T*>(input.specialBuffer()); if(isInplace) { swapShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), firstDim, dRandom); } else { std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); cudaMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), cudaMemcpyDeviceToDevice); //output.p<T>(Nd4jLong(0), input.e<T>(0)); PointersManager pointersManager(context, "helper::randomShuffle_"); int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int))); T* outputBuf = reinterpret_cast<T*>(output.specialBuffer()); fillShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, indicesDev, dRandom); pointersManager.synchronize(); } // rng.rewindH(firstDim - 1); cudaFree(dRandom); } else { // evaluate sub-arrays list of input array through all dimensions excluding first one std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0}); auto subArrsListIn = input.allTensorsAlongDimension(dimensions); // apply Fisher-Yates shuffle if(isInplace) { for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; if(i != r) subArrsListIn.at(i)->swapUnsafe(*subArrsListIn.at(r)); } } else { // evaluate sub-arrays list of output array through all dimensions excluding first one auto subArrsListOut = output.allTensorsAlongDimension(dimensions); std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); bool isZeroShuffled = false; for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; subArrsListOut.at(i)->assign(subArrsListIn.at(indices[r])); if(r == 0) isZeroShuffled = true; if(i != r) { subArrsListOut.at(r)->assign(subArrsListIn.at(indices[i])); math::nd4j_swap<int>(indices[i], indices[r]); } } if(!isZeroShuffled) subArrsListOut.at(0)->assign(subArrsListIn.at(0)); } rng.rewindH(firstDim-1); } NDArray::registerSpecialUse({&output}, {&input}); } void randomShuffle(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) { BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES); ////////////////////////////////////////////////////////////////////////// void eye(sd::LaunchContext * context, NDArray& output) { output.setIdentity(); } } } }
4fb4c95899641b04332ec9bb68e59087b8b4d763.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/null_mask.hpp> #include <cudf/transform.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_list_utilities.hpp> #include <cudf_test/type_lists.hpp> #include <random> #include <thrust/sequence.h> template <typename T> struct TypedColumnTest : public cudf::test::BaseFixture { cudf::data_type type() { return cudf::data_type{cudf::type_to_id<T>()}; } TypedColumnTest() : data{_num_elements * cudf::size_of(type()), rmm::cuda_stream_default}, mask{cudf::bitmask_allocation_size_bytes(_num_elements), rmm::cuda_stream_default} { auto typed_data = static_cast<char*>(data.data()); auto typed_mask = static_cast<char*>(mask.data()); thrust::sequence(thrust::device, typed_data, typed_data + data.size()); thrust::sequence(thrust::device, typed_mask, typed_mask + mask.size()); } cudf::size_type num_elements() { return _num_elements; } std::random_device r; std::default_random_engine generator{r()}; std::uniform_int_distribution<cudf::size_type> distribution{200, 1000}; cudf::size_type _num_elements{distribution(generator)}; rmm::device_buffer data{}; rmm::device_buffer mask{}; rmm::device_buffer all_valid_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)}; rmm::device_buffer all_null_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)}; }; TYPED_TEST_SUITE(TypedColumnTest, cudf::test::Types<int32_t>); /** * @brief Verifies equality of the properties and data of a `column`'s views. * * @param col The `column` to verify */ void verify_column_views(cudf::column col) { cudf::column_view view = col; cudf::mutable_column_view mutable_view = col; EXPECT_EQ(col.type(), view.type()); EXPECT_EQ(col.type(), mutable_view.type()); EXPECT_EQ(col.size(), view.size()); EXPECT_EQ(col.size(), mutable_view.size()); EXPECT_EQ(col.null_count(), view.null_count()); EXPECT_EQ(col.null_count(), mutable_view.null_count()); EXPECT_EQ(col.nullable(), view.nullable()); EXPECT_EQ(col.nullable(), mutable_view.nullable()); EXPECT_EQ(col.num_children(), view.num_children()); EXPECT_EQ(col.num_children(), mutable_view.num_children()); EXPECT_EQ(view.head(), mutable_view.head()); EXPECT_EQ(view.data<char>(), mutable_view.data<char>()); EXPECT_EQ(view.offset(), mutable_view.offset()); } TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask), 0}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask), this->num_elements()}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; rmm::device_buffer empty_null_mask{}; EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; auto invalid_size_null_mask = create_null_mask(::min(this->num_elements() - 50, 0), cudf::mask_state::ALL_VALID); EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_NO_THROW(col.set_null_count(0)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_NO_THROW(col.set_null_count(this->num_elements())); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_EQ(this->num_elements(), col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(0, col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, CopyDataNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); } TYPED_TEST(TypedColumnTest, MoveDataNoMask) { void* original_data = this->data.data(); cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); } TYPED_TEST(TypedColumnTest, CopyDataAndMask) { cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); EXPECT_NE(v.null_mask(), this->all_valid_mask.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.null_mask(), this->all_valid_mask.data(), this->mask.size()); } TYPED_TEST(TypedColumnTest, MoveDataAndMask) { void* original_data = this->data.data(); void* original_mask = this->all_valid_mask.data(); cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); EXPECT_EQ(v.null_mask(), original_mask); } TYPED_TEST(TypedColumnTest, CopyConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); } TYPED_TEST(TypedColumnTest, CopyConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } TYPED_TEST(TypedColumnTest, MoveConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; auto original_data = original.view().head(); cudf::column moved_to{std::move(original)}; EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); verify_column_views(moved_to); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); } TYPED_TEST(TypedColumnTest, MoveConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = original.view().head(); auto original_mask = original.view().null_mask(); cudf::column moved_to{std::move(original)}; verify_column_views(moved_to); EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); EXPECT_EQ(original_mask, moved_to_view.null_mask()); } TYPED_TEST(TypedColumnTest, ConstructWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; ; children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::INT8}, 42, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::FLOAT64}, 314, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; verify_column_views(col); EXPECT_EQ(2, col.num_children()); EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type()); EXPECT_EQ(42, col.child(0).size()); EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type()); EXPECT_EQ(314, col.child(1).size()); } TYPED_TEST(TypedColumnTest, ReleaseNoChildren) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(0u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ReleaseWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(2u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column_view original_view = original; cudf::column copy{original_view}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } template <typename T> struct ListsColumnTest : public cudf::test::BaseFixture { }; using NumericTypesNotBool = cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::FloatingPointTypes>; TYPED_TEST_SUITE(ListsColumnTest, NumericTypesNotBool); TYPED_TEST(ListsColumnTest, ListsColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; auto result = std::make_unique<cudf::column>(list); cudf::test::expect_columns_equal(list, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{3, 4}, {5, 6, 7}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedIncludesEmpty) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {}, {3, 4}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{}, {3, 4}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNonNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; // Column of List<int> LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; // Column of 1 row, an empty List<int> LCW expect{LCW{}}; auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, // < ----------- empty List<List<int>>, slice this {LCW{3}, LCW{4, 5}}}; // Make 1-row column of type List<List<int>>, the row data contains 0 element. // Well-formed memory layout: // type: List<List<int>> // Length: 1 // Mask: 1 // Offsets: 0, 0 // List<int> // Length: 0 // Offset: // INT // Length: 0 auto leaf = std::make_unique<cudf::column>(cudf::column(LCW{})); auto offset = std::make_unique<cudf::column>(cudf::column(FWCW_SZ{0, 0})); auto null_mask = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED); auto expect = cudf::make_lists_column(1, std::move(offset), std::move(leaf), 0, std::move(null_mask)); auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, {LCW{3}, LCW{4, 5}}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNonNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructorWithNulls) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); auto expect_valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? false : true; }); using LCW = cudf::test::lists_column_wrapper<TypeParam>; cudf::test::lists_column_wrapper<TypeParam> list{ {{{{1, 2}, {3, 4}}, valids}, LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, valids}; cudf::test::lists_column_wrapper<TypeParam> expect{ {LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, expect_valids}; auto sliced = cudf::slice(list, {1, 5}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); // TODO: null mask equality is being checked separately because // expect_columns_equal doesn't do the check for lists columns. // This is fixed in https://github.com/rapidsai/cudf/pull/5904, // so we should remove this check after that's merged: cudf::test::expect_columns_equal( cudf::mask_to_bools(result->view().null_mask(), 0, 4)->view(), cudf::mask_to_bools(static_cast<cudf::column_view>(expect).null_mask(), 0, 4)->view()); } CUDF_TEST_PROGRAM_MAIN()
4fb4c95899641b04332ec9bb68e59087b8b4d763.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/null_mask.hpp> #include <cudf/transform.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_list_utilities.hpp> #include <cudf_test/type_lists.hpp> #include <random> #include <thrust/sequence.h> template <typename T> struct TypedColumnTest : public cudf::test::BaseFixture { cudf::data_type type() { return cudf::data_type{cudf::type_to_id<T>()}; } TypedColumnTest() : data{_num_elements * cudf::size_of(type()), rmm::cuda_stream_default}, mask{cudf::bitmask_allocation_size_bytes(_num_elements), rmm::cuda_stream_default} { auto typed_data = static_cast<char*>(data.data()); auto typed_mask = static_cast<char*>(mask.data()); thrust::sequence(thrust::device, typed_data, typed_data + data.size()); thrust::sequence(thrust::device, typed_mask, typed_mask + mask.size()); } cudf::size_type num_elements() { return _num_elements; } std::random_device r; std::default_random_engine generator{r()}; std::uniform_int_distribution<cudf::size_type> distribution{200, 1000}; cudf::size_type _num_elements{distribution(generator)}; rmm::device_buffer data{}; rmm::device_buffer mask{}; rmm::device_buffer all_valid_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)}; rmm::device_buffer all_null_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)}; }; TYPED_TEST_SUITE(TypedColumnTest, cudf::test::Types<int32_t>); /** * @brief Verifies equality of the properties and data of a `column`'s views. * * @param col The `column` to verify */ void verify_column_views(cudf::column col) { cudf::column_view view = col; cudf::mutable_column_view mutable_view = col; EXPECT_EQ(col.type(), view.type()); EXPECT_EQ(col.type(), mutable_view.type()); EXPECT_EQ(col.size(), view.size()); EXPECT_EQ(col.size(), mutable_view.size()); EXPECT_EQ(col.null_count(), view.null_count()); EXPECT_EQ(col.null_count(), mutable_view.null_count()); EXPECT_EQ(col.nullable(), view.nullable()); EXPECT_EQ(col.nullable(), mutable_view.nullable()); EXPECT_EQ(col.num_children(), view.num_children()); EXPECT_EQ(col.num_children(), mutable_view.num_children()); EXPECT_EQ(view.head(), mutable_view.head()); EXPECT_EQ(view.data<char>(), mutable_view.data<char>()); EXPECT_EQ(view.offset(), mutable_view.offset()); } TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask), 0}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask), this->num_elements()}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; rmm::device_buffer empty_null_mask{}; EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; auto invalid_size_null_mask = create_null_mask(std::min(this->num_elements() - 50, 0), cudf::mask_state::ALL_VALID); EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_NO_THROW(col.set_null_count(0)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_NO_THROW(col.set_null_count(this->num_elements())); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_EQ(this->num_elements(), col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(0, col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, CopyDataNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); } TYPED_TEST(TypedColumnTest, MoveDataNoMask) { void* original_data = this->data.data(); cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); } TYPED_TEST(TypedColumnTest, CopyDataAndMask) { cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); EXPECT_NE(v.null_mask(), this->all_valid_mask.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.null_mask(), this->all_valid_mask.data(), this->mask.size()); } TYPED_TEST(TypedColumnTest, MoveDataAndMask) { void* original_data = this->data.data(); void* original_mask = this->all_valid_mask.data(); cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); EXPECT_EQ(v.null_mask(), original_mask); } TYPED_TEST(TypedColumnTest, CopyConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); } TYPED_TEST(TypedColumnTest, CopyConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } TYPED_TEST(TypedColumnTest, MoveConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; auto original_data = original.view().head(); cudf::column moved_to{std::move(original)}; EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); verify_column_views(moved_to); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); } TYPED_TEST(TypedColumnTest, MoveConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = original.view().head(); auto original_mask = original.view().null_mask(); cudf::column moved_to{std::move(original)}; verify_column_views(moved_to); EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); EXPECT_EQ(original_mask, moved_to_view.null_mask()); } TYPED_TEST(TypedColumnTest, ConstructWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; ; children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::INT8}, 42, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::FLOAT64}, 314, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; verify_column_views(col); EXPECT_EQ(2, col.num_children()); EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type()); EXPECT_EQ(42, col.child(0).size()); EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type()); EXPECT_EQ(314, col.child(1).size()); } TYPED_TEST(TypedColumnTest, ReleaseNoChildren) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(0u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ReleaseWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(2u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column_view original_view = original; cudf::column copy{original_view}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } template <typename T> struct ListsColumnTest : public cudf::test::BaseFixture { }; using NumericTypesNotBool = cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::FloatingPointTypes>; TYPED_TEST_SUITE(ListsColumnTest, NumericTypesNotBool); TYPED_TEST(ListsColumnTest, ListsColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; auto result = std::make_unique<cudf::column>(list); cudf::test::expect_columns_equal(list, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{3, 4}, {5, 6, 7}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedIncludesEmpty) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {}, {3, 4}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{}, {3, 4}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNonNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; // Column of List<int> LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; // Column of 1 row, an empty List<int> LCW expect{LCW{}}; auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, // < ----------- empty List<List<int>>, slice this {LCW{3}, LCW{4, 5}}}; // Make 1-row column of type List<List<int>>, the row data contains 0 element. // Well-formed memory layout: // type: List<List<int>> // Length: 1 // Mask: 1 // Offsets: 0, 0 // List<int> // Length: 0 // Offset: // INT // Length: 0 auto leaf = std::make_unique<cudf::column>(cudf::column(LCW{})); auto offset = std::make_unique<cudf::column>(cudf::column(FWCW_SZ{0, 0})); auto null_mask = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED); auto expect = cudf::make_lists_column(1, std::move(offset), std::move(leaf), 0, std::move(null_mask)); auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, {LCW{3}, LCW{4, 5}}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNonNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructorWithNulls) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); auto expect_valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? false : true; }); using LCW = cudf::test::lists_column_wrapper<TypeParam>; cudf::test::lists_column_wrapper<TypeParam> list{ {{{{1, 2}, {3, 4}}, valids}, LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, valids}; cudf::test::lists_column_wrapper<TypeParam> expect{ {LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, expect_valids}; auto sliced = cudf::slice(list, {1, 5}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); // TODO: null mask equality is being checked separately because // expect_columns_equal doesn't do the check for lists columns. // This is fixed in https://github.com/rapidsai/cudf/pull/5904, // so we should remove this check after that's merged: cudf::test::expect_columns_equal( cudf::mask_to_bools(result->view().null_mask(), 0, 4)->view(), cudf::mask_to_bools(static_cast<cudf::column_view>(expect).null_mask(), 0, 4)->view()); } CUDF_TEST_PROGRAM_MAIN()
20e412ceddf4871188ff4d6389b43b3957fb1039.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <hip/hip_runtime.h> // Utilities and system includes //#include <helper_functions.h> #include <hip/hip_runtime.h> #include "../include/common.h" //#include <ctime.h> #include <time.h> #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) //__constant__ float c_Kernel[KERNEL_LENGTH]; /*void setConvolutionKernel(float *h_Kernel) { hipMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); }*/ //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, float *c_Kernel ) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; __shared__ float s_Kernel[KERNEL_LENGTH * sizeof(float)]; for(int i = 0; i<KERNEL_LENGTH * sizeof(float);i++){ s_Kernel[i] =c_Kernel[i]; } //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += s_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, float *c_Kernel ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW, c_Kernel ); getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, float *c_Kernel ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; __shared__ float s_Kernel[KERNEL_LENGTH * sizeof(float)]; for(int i = 0; i<KERNEL_LENGTH * sizeof(float);i++){ s_Kernel[i] =c_Kernel[i]; } //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += s_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, float *c_Kernel ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW, c_Kernel ); getLastCudaError("convolutionColumnsKernel() execution failed\n"); } void convolutionRowCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = x + k; if (d >= 0 && d < imageW) sum += h_Src[y * imageW + d] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = y + k; if (d >= 0 && d < imageH) sum += h_Src[d * imageW + x] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input, *d_Output, *d_Buffer, *c_Kernel; const int imageW = 3072; const int imageH = 3072; const int iterations = 16; struct timespec t1,t2; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s //findCudaDevice(argc, (const char **)argv); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(hipMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); hipMalloc((void **)&c_Kernel, KERNEL_LENGTH*sizeof(float)); hipMemcpy(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float),hipMemcpyHostToDevice); // setConvolutionKernel(h_Kernel); checkCudaErrors(hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice)); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(hipDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC,&t1); } convolutionRowsGPU( d_Buffer, d_Input, imageW, imageH, c_Kernel ); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH, c_Kernel ); } checkCudaErrors(hipDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC,&t2); double gpuTime = ((t2.tv_sec-t1.tv_sec)+ (t2.tv_nsec-t1.tv_nsec)/1.e9)/ (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(hipMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), hipMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(hipFree(d_Buffer)); checkCudaErrors(hipFree(d_Output)); checkCudaErrors(hipFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); hipDeviceReset(); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
20e412ceddf4871188ff4d6389b43b3957fb1039.cu
/* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <cuda_runtime.h> // Utilities and system includes //#include <helper_functions.h> #include <cuda.h> #include "../include/common.h" //#include <ctime.h> #include <time.h> #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) //__constant__ float c_Kernel[KERNEL_LENGTH]; /*void setConvolutionKernel(float *h_Kernel) { cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); }*/ //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, float *c_Kernel ) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; __shared__ float s_Kernel[KERNEL_LENGTH * sizeof(float)]; for(int i = 0; i<KERNEL_LENGTH * sizeof(float);i++){ s_Kernel[i] =c_Kernel[i]; } //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += s_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, float *c_Kernel ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); convolutionRowsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW, c_Kernel ); getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, float *c_Kernel ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; __shared__ float s_Kernel[KERNEL_LENGTH * sizeof(float)]; for(int i = 0; i<KERNEL_LENGTH * sizeof(float);i++){ s_Kernel[i] =c_Kernel[i]; } //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += s_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, float *c_Kernel ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); convolutionColumnsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW, c_Kernel ); getLastCudaError("convolutionColumnsKernel() execution failed\n"); } void convolutionRowCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = x + k; if (d >= 0 && d < imageW) sum += h_Src[y * imageW + d] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = y + k; if (d >= 0 && d < imageH) sum += h_Src[d * imageW + x] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input, *d_Output, *d_Buffer, *c_Kernel; const int imageW = 3072; const int imageH = 3072; const int iterations = 16; struct timespec t1,t2; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s //findCudaDevice(argc, (const char **)argv); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(cudaMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); cudaMalloc((void **)&c_Kernel, KERNEL_LENGTH*sizeof(float)); cudaMemcpy(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float),cudaMemcpyHostToDevice); // setConvolutionKernel(h_Kernel); checkCudaErrors(cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice)); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(cudaDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC,&t1); } convolutionRowsGPU( d_Buffer, d_Input, imageW, imageH, c_Kernel ); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH, c_Kernel ); } checkCudaErrors(cudaDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC,&t2); double gpuTime = ((t2.tv_sec-t1.tv_sec)+ (t2.tv_nsec-t1.tv_nsec)/1.e9)/ (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(cudaMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), cudaMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(cudaFree(d_Buffer)); checkCudaErrors(cudaFree(d_Output)); checkCudaErrors(cudaFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); cudaDeviceReset(); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
14c8f70e14763faf447ea5512788ef55c25c9949.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <ctime> #include <stdlib.h> #include <iostream> __constant__ unsigned long long globalN[1]; __global__ void kernel_isPerfectNumber(bool *arr); __device__ bool isPerfectNumber(unsigned long long number); bool isPerfectNumber_(unsigned long long number); void host_isPerfectNumber(bool *arr, unsigned long long size); hipError_t CUDA_get_perfect_numbers(bool *arr, unsigned long long threads, unsigned long long blocks, unsigned long long N); int main() { unsigned long long threads = 512; unsigned long long blocks = 4; unsigned long long size = threads * blocks; unsigned long long N = size; bool *array_cpu = (bool *)malloc(size * sizeof(bool)); bool *array_gpu = (bool *)malloc(size * sizeof(bool)); unsigned int start_time; unsigned int end_time; unsigned int search_time; start_time = clock(); host_isPerfectNumber(array_cpu, N); end_time = clock(); search_time = end_time - start_time; std::cout << search_time / 1000.0 << std::endl; start_time = clock(); CUDA_get_perfect_numbers(array_gpu, threads, blocks, N); end_time = clock(); search_time = end_time - start_time; std::cout << search_time / 1000.0 << std::endl; for (unsigned long long i = 0; i < N; i++) { if (array_gpu[i]) std::cout << i << std::endl; } return 0; } //CPU void host_isPerfectNumber(bool *arr, unsigned long long size) { for (unsigned long long i = 0; i < size; i++) { if (isPerfectNumber_(i)) arr[i] = true; else arr[i] = false; } } bool isPerfectNumber_(unsigned long long number) { unsigned long long i = 1, sum = 0; while (i < number) { if (number%i == 0) sum = sum + i; i++; } if (sum == number) return true; else return false; } //GPU hipError_t CUDA_get_perfect_numbers(bool *arr, unsigned long long threads, unsigned long long blocks, unsigned long long N) { unsigned long long size = threads * blocks; bool *dev_arr = nullptr; hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipMalloc((void**)&dev_arr, size * sizeof(bool)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpyToSymbol(globalN, &N, sizeof(unsigned long long)); if (cudaStatus != hipSuccess) { fprintf(stderr, "Memcpy failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } hipLaunchKernelGGL(( kernel_isPerfectNumber) , dim3(blocks), dim3(threads) , 0, 0, dev_arr); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(arr, dev_arr, size * sizeof(bool), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_arr); return cudaStatus; } __global__ void kernel_isPerfectNumber(bool *arr) { unsigned long long i = threadIdx.x + blockDim.x*blockIdx.x; if (i >= globalN[0]) { arr[i] = false; return; } if (isPerfectNumber(i)) arr[i] = true; else arr[i] = false; } __device__ bool isPerfectNumber(unsigned long long number) { unsigned long long i = 1, sum = 0; while (i < number) { if (number%i == 0) sum = sum + i; i++; } if (sum == number) return true; else return false; }
14c8f70e14763faf447ea5512788ef55c25c9949.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <ctime> #include <stdlib.h> #include <iostream> __constant__ unsigned long long globalN[1]; __global__ void kernel_isPerfectNumber(bool *arr); __device__ bool isPerfectNumber(unsigned long long number); bool isPerfectNumber_(unsigned long long number); void host_isPerfectNumber(bool *arr, unsigned long long size); cudaError_t CUDA_get_perfect_numbers(bool *arr, unsigned long long threads, unsigned long long blocks, unsigned long long N); int main() { unsigned long long threads = 512; unsigned long long blocks = 4; unsigned long long size = threads * blocks; unsigned long long N = size; bool *array_cpu = (bool *)malloc(size * sizeof(bool)); bool *array_gpu = (bool *)malloc(size * sizeof(bool)); unsigned int start_time; unsigned int end_time; unsigned int search_time; start_time = clock(); host_isPerfectNumber(array_cpu, N); end_time = clock(); search_time = end_time - start_time; std::cout << search_time / 1000.0 << std::endl; start_time = clock(); CUDA_get_perfect_numbers(array_gpu, threads, blocks, N); end_time = clock(); search_time = end_time - start_time; std::cout << search_time / 1000.0 << std::endl; for (unsigned long long i = 0; i < N; i++) { if (array_gpu[i]) std::cout << i << std::endl; } return 0; } //CPU void host_isPerfectNumber(bool *arr, unsigned long long size) { for (unsigned long long i = 0; i < size; i++) { if (isPerfectNumber_(i)) arr[i] = true; else arr[i] = false; } } bool isPerfectNumber_(unsigned long long number) { unsigned long long i = 1, sum = 0; while (i < number) { if (number%i == 0) sum = sum + i; i++; } if (sum == number) return true; else return false; } //GPU cudaError_t CUDA_get_perfect_numbers(bool *arr, unsigned long long threads, unsigned long long blocks, unsigned long long N) { unsigned long long size = threads * blocks; bool *dev_arr = nullptr; cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_arr, size * sizeof(bool)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpyToSymbol(globalN, &N, sizeof(unsigned long long)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Memcpy failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } kernel_isPerfectNumber <<<blocks, threads >>> (dev_arr); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(arr, dev_arr, size * sizeof(bool), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_arr); return cudaStatus; } __global__ void kernel_isPerfectNumber(bool *arr) { unsigned long long i = threadIdx.x + blockDim.x*blockIdx.x; if (i >= globalN[0]) { arr[i] = false; return; } if (isPerfectNumber(i)) arr[i] = true; else arr[i] = false; } __device__ bool isPerfectNumber(unsigned long long number) { unsigned long long i = 1, sum = 0; while (i < number) { if (number%i == 0) sum = sum + i; i++; } if (sum == number) return true; else return false; }
4938908dc764008574d0bccae673974479b636bf.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "cuda_cluster.h" __global__ void clust_int(double *clusterX, double *clusterY, double *clusterZ, double *d_tt,double *d_nodeX,\ double *d_nodeY, double *d_nodeZ,double h_xb,double h_yb, double h_zb, double x0, \ double y0, double z0, int interpDegreeLim, int interpolationDegree, \ int interpolationPointsPerCluster, int startingIndexInClustersArray){ int i=threadIdx.x + blockDim.x * blockIdx.x; for ( int o=i;o<interpDegreeLim; o++){ d_tt[o]=cos(o * M_PI / interpolationDegree); d_nodeX[o]=x0 + (d_tt[o] + 1.0)/2.0 * h_xb; d_nodeY[o]=y0 + (d_tt[o] + 1.0)/2.0 * h_yb; d_nodeZ[o]=z0 + (d_tt[o] + 1.0)/2.0 * h_zb; } for ( j=i;j<interpolationPointsPerCluster;j++){ int k1 = j%(interpolationDegree+1); int kk = (j-k1)/(interpolationDegree+1); int k2 = kk%(interpolationDegree+1); kk=kk-k2; int k3 = kk/ (interpolationDegree+1); clusterX[startingIndexInClustersArray +j]=d_nodeX[k1]; clusterY[startingIndexInClustersArray +j]=d_nodeY[k2]; clusterZ[startingIndexInClustersArray +j]=d_nodeZ[k3]; } } void cluster_interp(double *clusterX, double *clusterY, double *clusterZ, \ double h_xb,double h_yb, double h_zb, double x0, \ double y0, double z0, int interpDegreeLim, int interpolationDegree, \ int interpolationPointsPerCluster, int startingIndexInClustersArray){ double *d_tt, *d_nodeX, *d_nodeY, *d_nodeZ, *d_clusterX,*d_clusterY,*d_clusterZ; double d_xb,d_yb,d_zb,h_xb,h_yb,h_zb,d_x0,d_y0,d_z0; hipMalloc((void **) &d_tt, interpDegreeLim * sizeof(double)); hipMalloc((void **) &d_nodeX, interpDegreeLim * sizeof(double)); hipMalloc((void **) &d_nodeY, interpDegreeLim * sizeof(double)); hipMalloc((void **) &d_nodeZ, interpDegreeLim * sizeof(double)); hipMalloc((void **) &d_clusterX, interpolationPoinstsPerCluster * sizeof(double)); hipMalloc((void **) &d_clusterY, interpolationPointsPerCluster * sizeof(double)); hipMalloc((void **) &d_clusterZ, interpolationPointsPerCluster * sizeof(double)); hipMemcpy(d_tt,0.0, interpDegreeLim * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_nodeX,0.0, interpDegreeLim * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_nodeY,0.0, interpDegreeLim * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_nodeZ,0.0, interpDegreeLim * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_clusterX,clusterX, interpolationPointsPerCluster * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_clusterY,clusterY, interpolationPointsPerCluster * sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_clusterZ,clusterZ, interpolationPointsPerCluster * sizeof(double),hipMemcpyHostToDevice); clust_int<<<1, 32>>(d_clusterX,d_clusterY,d_clusterZ,d_tt,d_nodeX,d_nodeY,d_nodeZ,h_xb,h_yb,h_zb,x0, \ y0,z0,interpDegreeLim,interpolationDegree,interpolationPointsPerCluster, \ startingIndexInClustersArray) hipDeviceSynchronize(); hipMemcpy(clusterX,d_clusterX,interpolationPointsPerCluster * sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(clusterY,d_clusterY,interpolationPointsPerCluster * sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(clusterZ,d_clusterZ,interpolationPointsPerCluster * sizeof(double),hipMemcpyDeviceToHost); hipFree(d_tt); hipFree(d_nodeX); hipFree(d_nodeY); hipFree(d_nodeZ); hipFree(d_clusterX); hipFree(d_clusterY); hipFree(d_clusterZ); return; }
4938908dc764008574d0bccae673974479b636bf.cu
#include <cuda.h> #include <cuda_runtime.h> #include "cuda_cluster.h" __global__ void clust_int(double *clusterX, double *clusterY, double *clusterZ, double *d_tt,double *d_nodeX,\ double *d_nodeY, double *d_nodeZ,double h_xb,double h_yb, double h_zb, double x0, \ double y0, double z0, int interpDegreeLim, int interpolationDegree, \ int interpolationPointsPerCluster, int startingIndexInClustersArray){ int i=threadIdx.x + blockDim.x * blockIdx.x; for ( int o=i;o<interpDegreeLim; o++){ d_tt[o]=cos(o * M_PI / interpolationDegree); d_nodeX[o]=x0 + (d_tt[o] + 1.0)/2.0 * h_xb; d_nodeY[o]=y0 + (d_tt[o] + 1.0)/2.0 * h_yb; d_nodeZ[o]=z0 + (d_tt[o] + 1.0)/2.0 * h_zb; } for ( j=i;j<interpolationPointsPerCluster;j++){ int k1 = j%(interpolationDegree+1); int kk = (j-k1)/(interpolationDegree+1); int k2 = kk%(interpolationDegree+1); kk=kk-k2; int k3 = kk/ (interpolationDegree+1); clusterX[startingIndexInClustersArray +j]=d_nodeX[k1]; clusterY[startingIndexInClustersArray +j]=d_nodeY[k2]; clusterZ[startingIndexInClustersArray +j]=d_nodeZ[k3]; } } void cluster_interp(double *clusterX, double *clusterY, double *clusterZ, \ double h_xb,double h_yb, double h_zb, double x0, \ double y0, double z0, int interpDegreeLim, int interpolationDegree, \ int interpolationPointsPerCluster, int startingIndexInClustersArray){ double *d_tt, *d_nodeX, *d_nodeY, *d_nodeZ, *d_clusterX,*d_clusterY,*d_clusterZ; double d_xb,d_yb,d_zb,h_xb,h_yb,h_zb,d_x0,d_y0,d_z0; cudaMalloc((void **) &d_tt, interpDegreeLim * sizeof(double)); cudaMalloc((void **) &d_nodeX, interpDegreeLim * sizeof(double)); cudaMalloc((void **) &d_nodeY, interpDegreeLim * sizeof(double)); cudaMalloc((void **) &d_nodeZ, interpDegreeLim * sizeof(double)); cudaMalloc((void **) &d_clusterX, interpolationPoinstsPerCluster * sizeof(double)); cudaMalloc((void **) &d_clusterY, interpolationPointsPerCluster * sizeof(double)); cudaMalloc((void **) &d_clusterZ, interpolationPointsPerCluster * sizeof(double)); cudaMemcpy(d_tt,0.0, interpDegreeLim * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_nodeX,0.0, interpDegreeLim * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_nodeY,0.0, interpDegreeLim * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_nodeZ,0.0, interpDegreeLim * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_clusterX,clusterX, interpolationPointsPerCluster * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_clusterY,clusterY, interpolationPointsPerCluster * sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_clusterZ,clusterZ, interpolationPointsPerCluster * sizeof(double),cudaMemcpyHostToDevice); clust_int<<<1, 32>>(d_clusterX,d_clusterY,d_clusterZ,d_tt,d_nodeX,d_nodeY,d_nodeZ,h_xb,h_yb,h_zb,x0, \ y0,z0,interpDegreeLim,interpolationDegree,interpolationPointsPerCluster, \ startingIndexInClustersArray) cudaDeviceSynchronize(); cudaMemcpy(clusterX,d_clusterX,interpolationPointsPerCluster * sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(clusterY,d_clusterY,interpolationPointsPerCluster * sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(clusterZ,d_clusterZ,interpolationPointsPerCluster * sizeof(double),cudaMemcpyDeviceToHost); cudaFree(d_tt); cudaFree(d_nodeX); cudaFree(d_nodeY); cudaFree(d_nodeZ); cudaFree(d_clusterX); cudaFree(d_clusterY); cudaFree(d_clusterZ); return; }
084e426ab43c0986e12f58131c7cb750c3345e15.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "normal_eqs_flow_multicam_GPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_CO = NULL; hipMalloc(&d_CO, XSIZE*YSIZE); float2 *d_flow_compact = NULL; hipMalloc(&d_flow_compact, XSIZE*YSIZE); float *d_Zbuffer_flow_compact = NULL; hipMalloc(&d_Zbuffer_flow_compact, XSIZE*YSIZE); int *d_ind_flow_Zbuffer = NULL; hipMalloc(&d_ind_flow_Zbuffer, XSIZE*YSIZE); const float *d_focal_length = NULL; hipMalloc(&d_focal_length, XSIZE*YSIZE); const float *d_nodal_point_x = NULL; hipMalloc(&d_nodal_point_x, XSIZE*YSIZE); const float *d_nodal_point_y = NULL; hipMalloc(&d_nodal_point_y, XSIZE*YSIZE); const int *d_n_rows = NULL; hipMalloc(&d_n_rows, XSIZE*YSIZE); const int *d_n_cols = NULL; hipMalloc(&d_n_cols, XSIZE*YSIZE); const int *d_n_values_flow = NULL; hipMalloc(&d_n_values_flow, XSIZE*YSIZE); const int *d_start_ind_flow = NULL; hipMalloc(&d_start_ind_flow, XSIZE*YSIZE); const int *d_pixel_ind_offset = NULL; hipMalloc(&d_pixel_ind_offset, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( normal_eqs_flow_multicam_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_n_rows,d_n_cols,d_n_values_flow,d_start_ind_flow,d_pixel_ind_offset); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( normal_eqs_flow_multicam_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_n_rows,d_n_cols,d_n_values_flow,d_start_ind_flow,d_pixel_ind_offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( normal_eqs_flow_multicam_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_n_rows,d_n_cols,d_n_values_flow,d_start_ind_flow,d_pixel_ind_offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
084e426ab43c0986e12f58131c7cb750c3345e15.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "normal_eqs_flow_multicam_GPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_CO = NULL; cudaMalloc(&d_CO, XSIZE*YSIZE); float2 *d_flow_compact = NULL; cudaMalloc(&d_flow_compact, XSIZE*YSIZE); float *d_Zbuffer_flow_compact = NULL; cudaMalloc(&d_Zbuffer_flow_compact, XSIZE*YSIZE); int *d_ind_flow_Zbuffer = NULL; cudaMalloc(&d_ind_flow_Zbuffer, XSIZE*YSIZE); const float *d_focal_length = NULL; cudaMalloc(&d_focal_length, XSIZE*YSIZE); const float *d_nodal_point_x = NULL; cudaMalloc(&d_nodal_point_x, XSIZE*YSIZE); const float *d_nodal_point_y = NULL; cudaMalloc(&d_nodal_point_y, XSIZE*YSIZE); const int *d_n_rows = NULL; cudaMalloc(&d_n_rows, XSIZE*YSIZE); const int *d_n_cols = NULL; cudaMalloc(&d_n_cols, XSIZE*YSIZE); const int *d_n_values_flow = NULL; cudaMalloc(&d_n_values_flow, XSIZE*YSIZE); const int *d_start_ind_flow = NULL; cudaMalloc(&d_start_ind_flow, XSIZE*YSIZE); const int *d_pixel_ind_offset = NULL; cudaMalloc(&d_pixel_ind_offset, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); normal_eqs_flow_multicam_GPU<<<gridBlock,threadBlock>>>(d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_n_rows,d_n_cols,d_n_values_flow,d_start_ind_flow,d_pixel_ind_offset); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { normal_eqs_flow_multicam_GPU<<<gridBlock,threadBlock>>>(d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_n_rows,d_n_cols,d_n_values_flow,d_start_ind_flow,d_pixel_ind_offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { normal_eqs_flow_multicam_GPU<<<gridBlock,threadBlock>>>(d_CO,d_flow_compact,d_Zbuffer_flow_compact,d_ind_flow_Zbuffer,d_focal_length,d_nodal_point_x,d_nodal_point_y,d_n_rows,d_n_cols,d_n_values_flow,d_start_ind_flow,d_pixel_ind_offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2b3d6782f07838040e9cca5163240bc9763b5af1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv( int a, int b ) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign( int a, int b ) { return CeilDiv(a, b) * b; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// The kernel of Poisson image cloning /// __global__ void PoissonImageCloningKernel( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox, const bool status ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if ( (yt % 2) ^ (xt % 2) == status && 0 < yt && yt < ht-1 && 0 < xt && xt < wt-1 && mask[curt] > 127.0f ) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if ( 0 < yb && yb < hb-1 && 0 < xb && xb < wb-1 ) { output[curb*3+0] = target[curt*3+0] +(output[(curb-wb)*3+0] + output[(curb-1)*3+0] + output[(curb+1)*3+0] + output[(curb+wb)*3+0] - target[(curt-wt)*3+0] - target[(curt-1)*3+0] - target[(curt+1)*3+0] - target[(curt+wt)*3+0]) / 4; output[curb*3+1] = target[curt*3+1] +(output[(curb-wb)*3+1] + output[(curb-1)*3+1] + output[(curb+1)*3+1] + output[(curb+wb)*3+1] - target[(curt-wt)*3+1] - target[(curt-1)*3+1] - target[(curt+1)*3+1] - target[(curt+wt)*3+1]) / 4; output[curb*3+2] = target[curt*3+2] +(output[(curb-wb)*3+2] + output[(curb-1)*3+2] + output[(curb+1)*3+2] + output[(curb+wb)*3+2] - target[(curt-wt)*3+2] - target[(curt-1)*3+2] - target[(curt+1)*3+2] - target[(curt+wt)*3+2]) / 4; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// The Poisson image cloning /// void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice); for ( auto i = 0; i < 20000; ++i ) { hipLaunchKernelGGL(( PoissonImageCloningKernel), dim3(dim3(CeilDiv(wt,32), CeilDiv(ht,16))), dim3(dim3(32,16)), 0, 0, background, target, mask, output, wb, hb, wt, ht, oy, ox, true ); hipLaunchKernelGGL(( PoissonImageCloningKernel), dim3(dim3(CeilDiv(wt,32), CeilDiv(ht,16))), dim3(dim3(32,16)), 0, 0, background, target, mask, output, wb, hb, wt, ht, oy, ox, false ); } }
2b3d6782f07838040e9cca5163240bc9763b5af1.cu
#include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv( int a, int b ) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign( int a, int b ) { return CeilDiv(a, b) * b; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// The kernel of Poisson image cloning /// __global__ void PoissonImageCloningKernel( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox, const bool status ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if ( (yt % 2) ^ (xt % 2) == status && 0 < yt && yt < ht-1 && 0 < xt && xt < wt-1 && mask[curt] > 127.0f ) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if ( 0 < yb && yb < hb-1 && 0 < xb && xb < wb-1 ) { output[curb*3+0] = target[curt*3+0] +(output[(curb-wb)*3+0] + output[(curb-1)*3+0] + output[(curb+1)*3+0] + output[(curb+wb)*3+0] - target[(curt-wt)*3+0] - target[(curt-1)*3+0] - target[(curt+1)*3+0] - target[(curt+wt)*3+0]) / 4; output[curb*3+1] = target[curt*3+1] +(output[(curb-wb)*3+1] + output[(curb-1)*3+1] + output[(curb+1)*3+1] + output[(curb+wb)*3+1] - target[(curt-wt)*3+1] - target[(curt-1)*3+1] - target[(curt+1)*3+1] - target[(curt+wt)*3+1]) / 4; output[curb*3+2] = target[curt*3+2] +(output[(curb-wb)*3+2] + output[(curb-1)*3+2] + output[(curb+1)*3+2] + output[(curb+wb)*3+2] - target[(curt-wt)*3+2] - target[(curt-1)*3+2] - target[(curt+1)*3+2] - target[(curt+wt)*3+2]) / 4; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// The Poisson image cloning /// void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice); for ( auto i = 0; i < 20000; ++i ) { PoissonImageCloningKernel<<<dim3(CeilDiv(wt,32), CeilDiv(ht,16)), dim3(32,16)>>>( background, target, mask, output, wb, hb, wt, ht, oy, ox, true ); PoissonImageCloningKernel<<<dim3(CeilDiv(wt,32), CeilDiv(ht,16)), dim3(32,16)>>>( background, target, mask, output, wb, hb, wt, ht, oy, ox, false ); } }
2ad7a0a15a6a50cbc1f173f197ae4e217d2df9f5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "multiply_device.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_a = NULL; hipMalloc(&d_a, XSIZE*YSIZE); double *d_b = NULL; hipMalloc(&d_b, XSIZE*YSIZE); int dim = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( multiply_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,dim); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( multiply_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,dim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( multiply_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,dim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2ad7a0a15a6a50cbc1f173f197ae4e217d2df9f5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "multiply_device.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_a = NULL; cudaMalloc(&d_a, XSIZE*YSIZE); double *d_b = NULL; cudaMalloc(&d_b, XSIZE*YSIZE); int dim = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); multiply_device<<<gridBlock,threadBlock>>>(d_a,d_b,dim); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { multiply_device<<<gridBlock,threadBlock>>>(d_a,d_b,dim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { multiply_device<<<gridBlock,threadBlock>>>(d_a,d_b,dim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fe256e8de146e094ef4fc7588425db45d3cf7dda.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/hip/cub_definitions.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty_like.h> #endif #include <ATen/hip/ThrustAllocator.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/device_ptr.h> #include <thrust/iterator/constant_iterator.h> namespace at { namespace native { void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices) { sorted_indices.copy_(linearIndex); const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::ThrustAllocator allocator; auto policy = thrust::hip::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, LTOp<int64_t>()); } #if !CUB_SUPPORTS_SCAN_BY_KEY() template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count) { using device_ptr = thrust::device_ptr<index_t>; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::ThrustAllocator allocator; auto policy = thrust::hip::par(allocator).on(stream); auto num_indices = count.numel(); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data_ptr<index_t>()); auto count_data = device_ptr(count.data_ptr<index_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<index_t>(), thrust::maximum<index_t>() ); } template void embedding_dense_backward_cuda_scan<int>(Tensor &sorted_indices, Tensor &count); template void embedding_dense_backward_cuda_scan<int64_t>(Tensor &sorted_indices, Tensor &count); #endif template<typename index_t> int64_t embedding_backward_cuda_kernel_unique_by_key(const Tensor &sorted_indices, Tensor &segment_offsets) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::ThrustAllocator allocator; auto policy = thrust::hip::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); return thrust::get<0>(ends) - dummy_dev; } template int64_t embedding_backward_cuda_kernel_unique_by_key<int>(const Tensor &sorted_indices, Tensor &segment_offsets); template int64_t embedding_backward_cuda_kernel_unique_by_key<int64_t>(const Tensor &sorted_indices, Tensor &segment_offsets); }}
fe256e8de146e094ef4fc7588425db45d3cf7dda.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/cuda/cub_definitions.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty_like.h> #endif #include <ATen/cuda/ThrustAllocator.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/device_ptr.h> #include <thrust/iterator/constant_iterator.h> namespace at { namespace native { void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices) { sorted_indices.copy_(linearIndex); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); at::cuda::ThrustAllocator allocator; auto policy = thrust::cuda::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, LTOp<int64_t>()); } #if !CUB_SUPPORTS_SCAN_BY_KEY() template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count) { using device_ptr = thrust::device_ptr<index_t>; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); at::cuda::ThrustAllocator allocator; auto policy = thrust::cuda::par(allocator).on(stream); auto num_indices = count.numel(); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data_ptr<index_t>()); auto count_data = device_ptr(count.data_ptr<index_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<index_t>(), thrust::maximum<index_t>() ); } template void embedding_dense_backward_cuda_scan<int>(Tensor &sorted_indices, Tensor &count); template void embedding_dense_backward_cuda_scan<int64_t>(Tensor &sorted_indices, Tensor &count); #endif template<typename index_t> int64_t embedding_backward_cuda_kernel_unique_by_key(const Tensor &sorted_indices, Tensor &segment_offsets) { auto stream = at::cuda::getCurrentCUDAStream(); at::cuda::ThrustAllocator allocator; auto policy = thrust::cuda::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); return thrust::get<0>(ends) - dummy_dev; } template int64_t embedding_backward_cuda_kernel_unique_by_key<int>(const Tensor &sorted_indices, Tensor &segment_offsets); template int64_t embedding_backward_cuda_kernel_unique_by_key<int64_t>(const Tensor &sorted_indices, Tensor &segment_offsets); }}
2ab20ea79900fa196523613ea0cbaf75701bf55e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <torch/extension.h> namespace { template <typename scalar_t> __global__ void path_conv_forward_cuda_kernel( scalar_t* __restrict__ output, const int64_t* __restrict__ path_indices, const scalar_t* __restrict__ features, int64_t n_paths, int64_t path_size, int64_t feat_path_size, int64_t hidden_size) { const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y; const int index = row * hidden_size + col; scalar_t val = 1. / path_size; int64_t node_idx; if (col < hidden_size && row < n_paths) { for (int64_t j = 0; j < path_size; ++j){ node_idx = path_indices[row * path_size + j]; output[index] += val * features[(node_idx * feat_path_size + j) * hidden_size + col]; } } } template <typename scalar_t> __global__ void path_conv_backward_cuda_kernel( scalar_t* __restrict__ d_input, const int64_t* __restrict__ path_indices, const scalar_t* __restrict__ d_output, int64_t n_paths, int64_t path_size, int64_t feat_path_size, int64_t hidden_size) { const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y; const int index = row * hidden_size + col; scalar_t val = 1. / path_size; int64_t node_idx; if (col < hidden_size && row < n_paths) { for (int64_t j = 0; j < path_size; ++j){ node_idx = path_indices[row * path_size + j]; d_input[(node_idx * feat_path_size + j) * hidden_size + col] += val * d_output[index]; } } } } torch::Tensor path_conv_forward_cuda( torch::Tensor path_indices, torch::Tensor features) { // path_indices: n_paths x path_size (value < n_nodes) // features: n_nodes x path_size x hidden_size x (in_path_size) // output: n_paths x hidden_size x (in_path_size) const int64_t n_paths = path_indices.size(0); const int64_t path_size = path_indices.size(1); const int64_t feat_path_size = features.size(1); const int64_t hidden_size = features.size(2); auto output = torch::zeros({n_paths, hidden_size}, features.options()); const int threads = 1024; const dim3 blocks((n_paths + threads - 1) / threads, hidden_size); AT_DISPATCH_FLOATING_TYPES(features.type(), "path_conv_forward_cuda", ([&] { hipLaunchKernelGGL(( path_conv_forward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, output.data_ptr<scalar_t>(), path_indices.data_ptr<int64_t>(), features.data_ptr<scalar_t>(), n_paths, path_size, feat_path_size, hidden_size); })); return output; } void path_conv_backward_cuda( torch::Tensor d_input, torch::Tensor d_output, torch::Tensor path_indices) { const int64_t n_paths = path_indices.size(0); const int64_t path_size = path_indices.size(1); const int64_t feat_path_size = d_input.size(1); const int64_t hidden_size = d_output.size(1); // auto commonDtype = promoteTypes(d_input.scalar_type(), d_output.scalar_type()); const int threads = 1024; const dim3 blocks((n_paths + threads - 1) / threads, hidden_size); AT_DISPATCH_FLOATING_TYPES(d_output.type(), "path_conv_backward_cuda", ([&] { hipLaunchKernelGGL(( path_conv_backward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, d_input.data_ptr<scalar_t>(), path_indices.data_ptr<int64_t>(), d_output.data_ptr<scalar_t>(), n_paths, path_size, feat_path_size, hidden_size); })); }
2ab20ea79900fa196523613ea0cbaf75701bf55e.cu
#include <torch/extension.h> namespace { template <typename scalar_t> __global__ void path_conv_forward_cuda_kernel( scalar_t* __restrict__ output, const int64_t* __restrict__ path_indices, const scalar_t* __restrict__ features, int64_t n_paths, int64_t path_size, int64_t feat_path_size, int64_t hidden_size) { const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y; const int index = row * hidden_size + col; scalar_t val = 1. / path_size; int64_t node_idx; if (col < hidden_size && row < n_paths) { for (int64_t j = 0; j < path_size; ++j){ node_idx = path_indices[row * path_size + j]; output[index] += val * features[(node_idx * feat_path_size + j) * hidden_size + col]; } } } template <typename scalar_t> __global__ void path_conv_backward_cuda_kernel( scalar_t* __restrict__ d_input, const int64_t* __restrict__ path_indices, const scalar_t* __restrict__ d_output, int64_t n_paths, int64_t path_size, int64_t feat_path_size, int64_t hidden_size) { const int row = blockIdx.x * blockDim.x + threadIdx.x; const int col = blockIdx.y; const int index = row * hidden_size + col; scalar_t val = 1. / path_size; int64_t node_idx; if (col < hidden_size && row < n_paths) { for (int64_t j = 0; j < path_size; ++j){ node_idx = path_indices[row * path_size + j]; d_input[(node_idx * feat_path_size + j) * hidden_size + col] += val * d_output[index]; } } } } torch::Tensor path_conv_forward_cuda( torch::Tensor path_indices, torch::Tensor features) { // path_indices: n_paths x path_size (value < n_nodes) // features: n_nodes x path_size x hidden_size x (in_path_size) // output: n_paths x hidden_size x (in_path_size) const int64_t n_paths = path_indices.size(0); const int64_t path_size = path_indices.size(1); const int64_t feat_path_size = features.size(1); const int64_t hidden_size = features.size(2); auto output = torch::zeros({n_paths, hidden_size}, features.options()); const int threads = 1024; const dim3 blocks((n_paths + threads - 1) / threads, hidden_size); AT_DISPATCH_FLOATING_TYPES(features.type(), "path_conv_forward_cuda", ([&] { path_conv_forward_cuda_kernel<scalar_t><<<blocks, threads>>>( output.data_ptr<scalar_t>(), path_indices.data_ptr<int64_t>(), features.data_ptr<scalar_t>(), n_paths, path_size, feat_path_size, hidden_size); })); return output; } void path_conv_backward_cuda( torch::Tensor d_input, torch::Tensor d_output, torch::Tensor path_indices) { const int64_t n_paths = path_indices.size(0); const int64_t path_size = path_indices.size(1); const int64_t feat_path_size = d_input.size(1); const int64_t hidden_size = d_output.size(1); // auto commonDtype = promoteTypes(d_input.scalar_type(), d_output.scalar_type()); const int threads = 1024; const dim3 blocks((n_paths + threads - 1) / threads, hidden_size); AT_DISPATCH_FLOATING_TYPES(d_output.type(), "path_conv_backward_cuda", ([&] { path_conv_backward_cuda_kernel<scalar_t><<<blocks, threads>>>( d_input.data_ptr<scalar_t>(), path_indices.data_ptr<int64_t>(), d_output.data_ptr<scalar_t>(), n_paths, path_size, feat_path_size, hidden_size); })); }
a8b00a78e2b7e51037a28a6353cd5e20765896ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gmock/gmock.h" #include "lattice/site_indexing/SiteIndex.h" #include "lattice/site_indexing/SiteNeighbourTableManager.h" #include "cudacommon/DeviceCommunicator.h" using namespace testing; using namespace culgt; class ASiteNeighbourTableManager: public Test { public: typedef SiteIndex<4,NO_SPLIT> MySite; LatticeDimension<4> dim; ASiteNeighbourTableManager() : dim(4,4,4,4) { } }; TEST_F( ASiteNeighbourTableManager, IsNotAvailableIfNotCreated ) { bool result = SiteNeighbourTableManager<MySite>::isAvailableOnHost( dim ); ASSERT_FALSE( result ); } TEST_F( ASiteNeighbourTableManager, IsAvailableIfCreated ) { SiteNeighbourTableManager<MySite>::generateOnHost( dim ); bool result = SiteNeighbourTableManager<MySite>::isAvailableOnHost( dim ); ASSERT_TRUE( result ); } TEST_F( ASiteNeighbourTableManager, NeighbourIndexIsCorrect ) { MySite site( dim, SiteNeighbourTableManager<MySite>::getHostPointer( dim ) ); site.setIndex( 0 ); site.setNeighbour( 3, true ); ASSERT_EQ( 1, site.getIndex() ); } __global__ void kernelTestNeighbourTable( LatticeDimension<4> dim, lat_index_t* nn, lat_index_t* var ) { typedef SiteIndex<4,NO_SPLIT> MySite; MySite site( dim, nn ); site.setIndex( 0 ); site.setNeighbour( 3, true ); var[0] = site.getIndex(); } TEST( ASiteNeighbourTableManagerOnDevice, NeighbourIndexIsCorrect ) { typedef SiteIndex<4,NO_SPLIT> MySite; LatticeDimension<4> dim(8,8,8,8); lat_index_t* deviceVar; hipMalloc( &deviceVar, sizeof(lat_index_t) ); hipLaunchKernelGGL(( kernelTestNeighbourTable), dim3(1),dim3(1), 0, 0, dim, SiteNeighbourTableManager<MySite>::getDevicePointer( dim ), deviceVar ); CUDA_LAST_ERROR( "test kernel" ); ASSERT_EQ( 1, DeviceCommunicator<lat_index_t>::getValue( deviceVar, 0 ) ); }
a8b00a78e2b7e51037a28a6353cd5e20765896ce.cu
#include "gmock/gmock.h" #include "lattice/site_indexing/SiteIndex.h" #include "lattice/site_indexing/SiteNeighbourTableManager.h" #include "cudacommon/DeviceCommunicator.h" using namespace testing; using namespace culgt; class ASiteNeighbourTableManager: public Test { public: typedef SiteIndex<4,NO_SPLIT> MySite; LatticeDimension<4> dim; ASiteNeighbourTableManager() : dim(4,4,4,4) { } }; TEST_F( ASiteNeighbourTableManager, IsNotAvailableIfNotCreated ) { bool result = SiteNeighbourTableManager<MySite>::isAvailableOnHost( dim ); ASSERT_FALSE( result ); } TEST_F( ASiteNeighbourTableManager, IsAvailableIfCreated ) { SiteNeighbourTableManager<MySite>::generateOnHost( dim ); bool result = SiteNeighbourTableManager<MySite>::isAvailableOnHost( dim ); ASSERT_TRUE( result ); } TEST_F( ASiteNeighbourTableManager, NeighbourIndexIsCorrect ) { MySite site( dim, SiteNeighbourTableManager<MySite>::getHostPointer( dim ) ); site.setIndex( 0 ); site.setNeighbour( 3, true ); ASSERT_EQ( 1, site.getIndex() ); } __global__ void kernelTestNeighbourTable( LatticeDimension<4> dim, lat_index_t* nn, lat_index_t* var ) { typedef SiteIndex<4,NO_SPLIT> MySite; MySite site( dim, nn ); site.setIndex( 0 ); site.setNeighbour( 3, true ); var[0] = site.getIndex(); } TEST( ASiteNeighbourTableManagerOnDevice, NeighbourIndexIsCorrect ) { typedef SiteIndex<4,NO_SPLIT> MySite; LatticeDimension<4> dim(8,8,8,8); lat_index_t* deviceVar; cudaMalloc( &deviceVar, sizeof(lat_index_t) ); kernelTestNeighbourTable<<<1,1>>>( dim, SiteNeighbourTableManager<MySite>::getDevicePointer( dim ), deviceVar ); CUDA_LAST_ERROR( "test kernel" ); ASSERT_EQ( 1, DeviceCommunicator<lat_index_t>::getValue( deviceVar, 0 ) ); }
d78b601acf435fca79110d79d92d999395c0ae42.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "cuda_vector_routines.h" #include "isspa_class.h" #include "isspa_force_cuda.h" #include "constants.h" #include "hip/hip_runtime_api.h" using namespace std; // constants __constant__ int nTypes; __constant__ int nMC; __constant__ int nRs; __constant__ int nGRs; __constant__ int nERs; __constant__ int nAtoms; __constant__ int nPairs; __constant__ float2 box; __constant__ float2 forceRparams; __constant__ float2 gRparams; __constant__ float2 eRparams; // device functions // CUDA Kernels // atomic multiply __device__ float atomicMul(float* address, float val) { unsigned int* address_as_u = (unsigned int*)address; unsigned int old = *address_as_u, assumed; do { assumed = old; old = atomicCAS(address_as_u, assumed, __float_as_uint(val * __uint_as_float(assumed))); } while (assumed != old); return __uint_as_float(old); } // warp reduce a float using multiplication __inline__ __device__ float warpReduceMul(float val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val *= __shfl_down(val, offset); return val; } // warp reduce a float4 __inline__ __device__ float4 warpReduceSumQuad(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); val.w += __shfl_down(val.w, offset); } return val; } // warp reduce a float4 but only the first three values __inline__ __device__ float4 warpReduceSumTriple(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); } return val; } // kernel to generate MC points around each atom __global__ void isspa_MC_points_kernel(float4 *xyz, float4 *mcpos, hiprandState_t *state, const float* __restrict__ rmax, int *isspaTypes) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int atom; int it; float r2; float rmax_l; float4 mcr; float4 mcpos_l; hiprandState_t threadState; atom = int(double(index)/double(nMC)); if (atom < nAtoms) { // load atom paramters it = __ldg(isspaTypes+atom); rmax_l = rmax[it]; mcpos_l = __ldg(xyz+atom); // initialize the random state threadState = state[index]; // generate point in constant density sphere do { mcr.x = fmaf(2.0f,hiprand_uniform(&threadState),-1.0f); mcr.y = fmaf(2.0f,hiprand_uniform(&threadState),-1.0f); mcr.z = fmaf(2.0f,hiprand_uniform(&threadState),-1.0f); r2 = mcr.x*mcr.x + mcr.y*mcr.y + mcr.z*mcr.z; } while (r2 >= 0.99f); // expand sphere and translate by atom position mcr *= rmax_l; mcpos_l += mcr; // initialize density at MC point to 1 mcpos_l.w = 1.0f; // save MC point and random state back to global memory mcpos[index] = mcpos_l; state[index] = threadState; } } // kernel to compute density and mean field at each MC point __global__ void isspa_field_kernel(float4 *xyz, const float* __restrict__ rmax, int *isspaTypes, const float* __restrict__ gTable, const float* __restrict__ eTable, float4 *enow, float4 *e0now, float4 *mcpos, int nThreads) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int atom; int atom2; int MC; int MCind; int bin; int it; int jt; float rmax_l; float dist2, dist; float fracDist; float g1, g2; float e1, e2; float etab; float2 gRparams_l = gRparams; float2 eRparams_l = eRparams; float4 atom2_pos; float4 r; float4 mcpos_l; float4 enow_l; float4 e0now_l; // Determine which atom the MC point is being generated on atom = int(double(index)/double(nThreads*nMC)); MC = int(double(index)/double(nThreads)); MCind = int(MC - atom*nMC); atom2 = int(index - atom*nMC*nThreads - MCind*nThreads); // zero the local variables that will be reduced if (atom < nAtoms) { if (MCind < nMC) { if (atom2 < nAtoms) { enow_l.x = enow_l.y = enow_l.z = enow_l.w = 0.0f; e0now_l.x = e0now_l.y = e0now_l.z = e0now_l.w = 0.0f; // Get atom positions mcpos_l = __ldg(mcpos+MC); it = __ldg(isspaTypes+atom); rmax_l = rmax[it]; // Get atom positions atom2_pos = __ldg(xyz+atom2); // Get constants for atom jt = __ldg(isspaTypes+atom2); r = min_image(mcpos_l - atom2_pos,box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); if (dist <= rmax_l) { e0now_l.w = 1.0f; // determine density bin of distance bin = int(__fdividef(dist-gRparams_l.x,gRparams_l.y)); // make sure bin is in limits of density table if (bin < 0) { mcpos_l.w = 0.0f; } else if (bin < nGRs-1) { // Push Density to MC point //fracDist = (dist - (gRparams_l.x+bin*gRparams_l.y)) / gRparams_l.y; fracDist = __fdividef((dist - (gRparams_l.x+bin*gRparams_l.y)),gRparams_l.y); g1 = gTable[jt*nGRs+bin]; g2 = gTable[jt*nGRs+bin+1]; mcpos_l.w = fmaf(g2,fracDist,g1*(1.0f-fracDist)); //mcpos_l.w = gTable[jt*nGRs+bin]; // Push mean field to MC point //fracDist = (dist - (eRparams_l.x+bin*eRparams_l.y)) / eRparams_l.y; fracDist = __fdividef((dist - (eRparams_l.x+bin*eRparams_l.y)),eRparams_l.y); e1 = eTable[jt*nERs+bin]; e2 = eTable[jt*nERs+bin+1]; etab = fmaf(e2,fracDist,e1*(1.0f-fracDist)); //etab = eTable[jt*nGRs+bin]; enow_l += r*__fdividef(etab,dist); } } else { e0now_l = -r*__fdividef(e0*atom2_pos.w,dist2*dist); e0now_l.w = 0.0f; mcpos_l.w = 1.0f; } enow_l -= r*__fdividef(e0*atom2_pos.w,dist2*dist); } else { enow_l.x = enow_l.y = enow_l.z = 0.0f; e0now_l.x = e0now_l.y = e0now_l.z = e0now_l.w = 0.0f; mcpos_l.w = 1.0f; } // Warp reduce the fields mcpos_l.w = warpReduceMul(mcpos_l.w); enow_l = warpReduceSumTriple(enow_l); e0now_l = warpReduceSumQuad(e0now_l); // Add the fields to the global variable if ((threadIdx.x & (warpSize - 1)) == 0) { atomicMul(&(mcpos[MC].w), mcpos_l.w); atomicAdd(&(enow[MC].x), enow_l.x); atomicAdd(&(enow[MC].y), enow_l.y); atomicAdd(&(enow[MC].z), enow_l.z); atomicAdd(&(e0now[MC].x), e0now_l.x); atomicAdd(&(e0now[MC].y), e0now_l.y); atomicAdd(&(e0now[MC].z), e0now_l.z); atomicAdd(&(e0now[MC].w), e0now_l.w); } } } } __global__ void isspa_force_kernel(float4 *xyz, const float* __restrict__ vtot, const float* __restrict__ rmax, int *isspaTypes, const float* __restrict__ forceTable, float4 *f, float4 *enow, float4 *e0now, float4 *mcpos, float nThreads, float4 *isspaf) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int bin; int jt; int MC; int atom; float igo; float fs; float r0; float rmax_l; float vtot_l; float dist2, dist; float pdotr; float cothE; float c1,c2,c3; float dp1,dp2,dp3; float Rz; float f1, f2; float fracDist; float4 xyz_l; float4 r; float4 fi; //float4 fj; float4 mcpos_l; float4 enow_l; float4 e0now_l; // Determine the atom for which the force is being summed on atom = int(double(index)/double(nThreads)); MC = int(index-atom*nThreads); // Zero out the forces fi.x = fi.y = fi.z = 0.0f; //fj.x = fj.y = fj.z = 0.0f; if (atom < nAtoms) { if (MC < nAtoms*nMC) { // Load in position, atom type, and rmax of atom xyz_l = __ldg(xyz+atom); jt = __ldg(isspaTypes + atom); rmax_l = rmax[jt]; vtot_l = vtot[jt]; // Load in field data for the MC point mcpos_l = __ldg(mcpos+MC); enow_l = __ldg(enow+MC); e0now_l = __ldg(e0now+MC); // Finish calculating density igo = __fdividef(vtot_l,e0now_l.w); mcpos_l.w *= igo; // Convert enow into polarzation r0 = norm3df(enow_l.x, enow_l.y, enow_l.z); enow_l.x = __fdividef(enow_l.x,r0); enow_l.y = __fdividef(enow_l.y,r0); enow_l.z = __fdividef(enow_l.z,r0); enow_l.w = r0; e0now_l.x = __fdividef(e0now_l.x,3.0f); e0now_l.y = __fdividef(e0now_l.y,3.0f); e0now_l.z = __fdividef(e0now_l.z,3.0f); e0now_l.w = igo; // Calculate the distance between the MC point and atom1 r = min_image(mcpos_l - xyz_l,box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); // Coulombic Force cothE=__fdividef(1.0f,tanhf(enow_l.w)); c1=cothE-__fdividef(1.0f,enow_l.w); c2=1.0f-2.0f*__fdividef(c1,enow_l.w); c3=cothE-3.0f*__fdividef(c2,enow_l.w); Rz=__fdividef(enow_l.x*r.x+enow_l.y*r.y+enow_l.z*r.z,dist); dp1=3.0f*Rz; dp2=7.5f*Rz*Rz-1.5f; dp3=(17.50f*Rz*Rz-7.50f)*Rz; // Calculate dipole term fs = __fdividef(-xyz_l.w*p0*c1*mcpos_l.w,dist2*dist); fi += fs*(r*__fdividef(dp1,dist)-enow_l); //fj += fs*(r*__fdividef(dp1,dist)-enow_l); // Calculate quadrapole term fs = __fdividef(-xyz_l.w*q0*(1.5f*c2-0.5f)*mcpos_l.w,dist2*dist2); fi += fs*(r*__fdividef(dp2,dist)-dp1*enow_l); //fj += fs*(r*__fdividef(dp2,dist)-dp1*enow_l); // Calculate octapole term fs = __fdividef(-xyz_l.w*o0*(2.5f*c3-1.5f*c1)*mcpos_l.w,dist2*dist2*dist); fi += fs*(r*__fdividef(dp3,dist)-dp2*enow_l); //fj += fs*(r*__fdividef(dp3,dist)-dp2*enow_l); // Lennard-Jones Force if (dist <= rmax_l) { bin = int ( __fdividef(dist-forceRparams.x,forceRparams.y) + 0.5f); if (bin >= (nRs)) { fs = 0.0f; } else { //Lennard-Jones Force fracDist = __fdividef((dist-(forceRparams.x+bin*forceRparams.y)),forceRparams.y); f1 = forceTable[jt*nRs+bin]; f2 = forceTable[jt*nRs+bin+1]; fs = (f1*(1.0-fracDist)+f2*fracDist)*mcpos_l.w; fs = fmaf(f2,fracDist,f1*(1.0f-fracDist))*mcpos_l.w; //fs = forceTable[jt*nRs+bin]*mcpos_l.w; } fi += r*__fdividef(-fs,dist); //fj += r*__fdividef(-fs,dist); } else { // Constant Density Dielectric fs=__fdividef(-xyz_l.w*p0,dist2*dist); pdotr=__fdividef(3.0f*(e0now_l.x*r.x+e0now_l.y*r.y+e0now_l.z*r.z),dist2); fi += fs*(pdotr*r-e0now_l)*e0now_l.w; //fj += fs*(pdotr*r-e0now_l)*e0now_l.w; } } } // Warp reduce the forces fi = warpReduceSumTriple(fi); //fj = warpReduceSumTriple(fj); // Add the force to the global force if ((threadIdx.x & (warpSize - 1)) == 0) { atomicAdd(&(f[atom].x), fi.x); atomicAdd(&(f[atom].y), fi.y); atomicAdd(&(f[atom].z), fi.z); //atomicAdd(&(isspaf[atom].x), fj.x); //atomicAdd(&(isspaf[atom].y), fj.y); //atomicAdd(&(isspaf[atom].z), fj.z); } } /* C wrappers for kernels */ float isspa_force_cuda(float4 *xyz_d, float4 *f_d, float4 *isspaf_d, isspa& isspas, int nAtoms_h) { //float isspa_force_cuda(float4 *xyz_d, float4 *f_d, isspa& isspas, int nAtoms_h) { float milliseconds; // timing hipEventRecord(isspas.isspaStart); hipProfilerStart(); // zero IS-SPA arrays on GPU hipMemset(isspas.enow_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); hipMemset(isspas.e0now_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); hipMemset(isspaf_d, 0.0f, nAtoms_h*sizeof(float4)); // compute position of each MC point hipLaunchKernelGGL(( isspa_MC_points_kernel), dim3(isspas.mcGridSize),dim3(isspas.mcBlockSize) , 0, 0, xyz_d, isspas.mcpos_d, isspas.randStates_d, isspas.rmax_d, isspas.isspaTypes_d); // compute densities and mean electric field value for each MC point hipLaunchKernelGGL(( isspa_field_kernel), dim3(isspas.fieldGridSize), dim3(isspas.fieldBlockSize), 0, 0, xyz_d, isspas.rmax_d, isspas.isspaTypes_d, isspas.isspaGTable_d, isspas.isspaETable_d, isspas.enow_d, isspas.e0now_d, isspas.mcpos_d, isspas.fieldThreads); // compute forces for each atom hipLaunchKernelGGL(( isspa_force_kernel), dim3(isspas.forceGridSize), dim3(isspas.forceBlockSize), 0, 0, xyz_d,isspas.vtot_d,isspas.rmax_d,isspas.isspaTypes_d,isspas.isspaForceTable_d,f_d,isspas.enow_d,isspas.e0now_d,isspas.mcpos_d,isspas.forceThreads,isspaf_d); hipDeviceSynchronize(); hipProfilerStop(); // finish timing hipEventRecord(isspas.isspaStop); hipEventSynchronize(isspas.isspaStop); hipEventElapsedTime(&milliseconds, isspas.isspaStart, isspas.isspaStop); return milliseconds; } void isspa_grid_block(int nAtoms_h, int nPairs_h, float lbox_h, isspa& isspas) { float2 box_h; int maxThreadsPerBlock = 1024; int temp; // determine gridSize and blockSize for MC kernel isspas.mcGridSize = int(ceil(isspas.nMC*nAtoms_h/(float) maxThreadsPerBlock)); isspas.mcBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA mc kernel blocks: %d \n", isspas.mcGridSize); printf("Number of IS-SPA mc kernel threads per block: %d \n", isspas.mcBlockSize); // determine gridSize and blockSize for field kernel temp = int(ceil((nAtoms_h) / (float) 32.0)); isspas.fieldThreads = temp*32; isspas.fieldGridSize = int(ceil(isspas.fieldThreads*nAtoms_h*isspas.nMC / (float) maxThreadsPerBlock)); isspas.fieldBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA field kernel blocks: %d \n", isspas.fieldGridSize); printf("Number of IS-SPA field kernel threads per block: %d \n", isspas.fieldBlockSize); // determine gridSize and blockSize for force kernel temp = int(ceil((nAtoms_h*isspas.nMC) / (float) 32.0)); isspas.forceThreads = temp*32; isspas.forceGridSize = int(ceil(isspas.forceThreads*nAtoms_h / (float) maxThreadsPerBlock)); isspas.forceBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA force kernel blocks: %d \n", isspas.forceGridSize); printf("Number of IS-SPA force kernel threads per block: %d \n", isspas.forceBlockSize); printf("Number of IS-SPA force kernel MC points: %d %d \n", isspas.forceThreads,nAtoms_h*isspas.nMC); // fill box with box and half box length box_h.x = lbox_h; box_h.y = lbox_h/2.0f; // set constant memory hipMemcpyToSymbol(nMC, &isspas.nMC, sizeof(int)); hipMemcpyToSymbol(nTypes, &isspas.nTypes, sizeof(int)); hipMemcpyToSymbol(nRs, &isspas.nRs, sizeof(int)); hipMemcpyToSymbol(nGRs, &isspas.nGRs, sizeof(int)); hipMemcpyToSymbol(nERs, &isspas.nERs, sizeof(int)); hipMemcpyToSymbol(nAtoms, &nAtoms_h, sizeof(int)); hipMemcpyToSymbol(nPairs, &nPairs_h, sizeof(int)); hipMemcpyToSymbol(box, &box_h, sizeof(float2)); hipMemcpyToSymbol(forceRparams, &isspas.forceRparams, sizeof(float2)); hipMemcpyToSymbol(gRparams, &isspas.gRparams, sizeof(float2)); hipMemcpyToSymbol(eRparams, &isspas.eRparams, sizeof(float2)); }
d78b601acf435fca79110d79d92d999395c0ae42.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <curand.h> #include <curand_kernel.h> #include "cuda_vector_routines.h" #include "isspa_class.h" #include "isspa_force_cuda.h" #include "constants.h" #include "cuda_profiler_api.h" using namespace std; // constants __constant__ int nTypes; __constant__ int nMC; __constant__ int nRs; __constant__ int nGRs; __constant__ int nERs; __constant__ int nAtoms; __constant__ int nPairs; __constant__ float2 box; __constant__ float2 forceRparams; __constant__ float2 gRparams; __constant__ float2 eRparams; // device functions // CUDA Kernels // atomic multiply __device__ float atomicMul(float* address, float val) { unsigned int* address_as_u = (unsigned int*)address; unsigned int old = *address_as_u, assumed; do { assumed = old; old = atomicCAS(address_as_u, assumed, __float_as_uint(val * __uint_as_float(assumed))); } while (assumed != old); return __uint_as_float(old); } // warp reduce a float using multiplication __inline__ __device__ float warpReduceMul(float val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val *= __shfl_down(val, offset); return val; } // warp reduce a float4 __inline__ __device__ float4 warpReduceSumQuad(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); val.w += __shfl_down(val.w, offset); } return val; } // warp reduce a float4 but only the first three values __inline__ __device__ float4 warpReduceSumTriple(float4 val) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { val.x += __shfl_down(val.x, offset); val.y += __shfl_down(val.y, offset); val.z += __shfl_down(val.z, offset); } return val; } // kernel to generate MC points around each atom __global__ void isspa_MC_points_kernel(float4 *xyz, float4 *mcpos, curandState *state, const float* __restrict__ rmax, int *isspaTypes) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int atom; int it; float r2; float rmax_l; float4 mcr; float4 mcpos_l; curandState_t threadState; atom = int(double(index)/double(nMC)); if (atom < nAtoms) { // load atom paramters it = __ldg(isspaTypes+atom); rmax_l = rmax[it]; mcpos_l = __ldg(xyz+atom); // initialize the random state threadState = state[index]; // generate point in constant density sphere do { mcr.x = fmaf(2.0f,curand_uniform(&threadState),-1.0f); mcr.y = fmaf(2.0f,curand_uniform(&threadState),-1.0f); mcr.z = fmaf(2.0f,curand_uniform(&threadState),-1.0f); r2 = mcr.x*mcr.x + mcr.y*mcr.y + mcr.z*mcr.z; } while (r2 >= 0.99f); // expand sphere and translate by atom position mcr *= rmax_l; mcpos_l += mcr; // initialize density at MC point to 1 mcpos_l.w = 1.0f; // save MC point and random state back to global memory mcpos[index] = mcpos_l; state[index] = threadState; } } // kernel to compute density and mean field at each MC point __global__ void isspa_field_kernel(float4 *xyz, const float* __restrict__ rmax, int *isspaTypes, const float* __restrict__ gTable, const float* __restrict__ eTable, float4 *enow, float4 *e0now, float4 *mcpos, int nThreads) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int atom; int atom2; int MC; int MCind; int bin; int it; int jt; float rmax_l; float dist2, dist; float fracDist; float g1, g2; float e1, e2; float etab; float2 gRparams_l = gRparams; float2 eRparams_l = eRparams; float4 atom2_pos; float4 r; float4 mcpos_l; float4 enow_l; float4 e0now_l; // Determine which atom the MC point is being generated on atom = int(double(index)/double(nThreads*nMC)); MC = int(double(index)/double(nThreads)); MCind = int(MC - atom*nMC); atom2 = int(index - atom*nMC*nThreads - MCind*nThreads); // zero the local variables that will be reduced if (atom < nAtoms) { if (MCind < nMC) { if (atom2 < nAtoms) { enow_l.x = enow_l.y = enow_l.z = enow_l.w = 0.0f; e0now_l.x = e0now_l.y = e0now_l.z = e0now_l.w = 0.0f; // Get atom positions mcpos_l = __ldg(mcpos+MC); it = __ldg(isspaTypes+atom); rmax_l = rmax[it]; // Get atom positions atom2_pos = __ldg(xyz+atom2); // Get constants for atom jt = __ldg(isspaTypes+atom2); r = min_image(mcpos_l - atom2_pos,box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); if (dist <= rmax_l) { e0now_l.w = 1.0f; // determine density bin of distance bin = int(__fdividef(dist-gRparams_l.x,gRparams_l.y)); // make sure bin is in limits of density table if (bin < 0) { mcpos_l.w = 0.0f; } else if (bin < nGRs-1) { // Push Density to MC point //fracDist = (dist - (gRparams_l.x+bin*gRparams_l.y)) / gRparams_l.y; fracDist = __fdividef((dist - (gRparams_l.x+bin*gRparams_l.y)),gRparams_l.y); g1 = gTable[jt*nGRs+bin]; g2 = gTable[jt*nGRs+bin+1]; mcpos_l.w = fmaf(g2,fracDist,g1*(1.0f-fracDist)); //mcpos_l.w = gTable[jt*nGRs+bin]; // Push mean field to MC point //fracDist = (dist - (eRparams_l.x+bin*eRparams_l.y)) / eRparams_l.y; fracDist = __fdividef((dist - (eRparams_l.x+bin*eRparams_l.y)),eRparams_l.y); e1 = eTable[jt*nERs+bin]; e2 = eTable[jt*nERs+bin+1]; etab = fmaf(e2,fracDist,e1*(1.0f-fracDist)); //etab = eTable[jt*nGRs+bin]; enow_l += r*__fdividef(etab,dist); } } else { e0now_l = -r*__fdividef(e0*atom2_pos.w,dist2*dist); e0now_l.w = 0.0f; mcpos_l.w = 1.0f; } enow_l -= r*__fdividef(e0*atom2_pos.w,dist2*dist); } else { enow_l.x = enow_l.y = enow_l.z = 0.0f; e0now_l.x = e0now_l.y = e0now_l.z = e0now_l.w = 0.0f; mcpos_l.w = 1.0f; } // Warp reduce the fields mcpos_l.w = warpReduceMul(mcpos_l.w); enow_l = warpReduceSumTriple(enow_l); e0now_l = warpReduceSumQuad(e0now_l); // Add the fields to the global variable if ((threadIdx.x & (warpSize - 1)) == 0) { atomicMul(&(mcpos[MC].w), mcpos_l.w); atomicAdd(&(enow[MC].x), enow_l.x); atomicAdd(&(enow[MC].y), enow_l.y); atomicAdd(&(enow[MC].z), enow_l.z); atomicAdd(&(e0now[MC].x), e0now_l.x); atomicAdd(&(e0now[MC].y), e0now_l.y); atomicAdd(&(e0now[MC].z), e0now_l.z); atomicAdd(&(e0now[MC].w), e0now_l.w); } } } } __global__ void isspa_force_kernel(float4 *xyz, const float* __restrict__ vtot, const float* __restrict__ rmax, int *isspaTypes, const float* __restrict__ forceTable, float4 *f, float4 *enow, float4 *e0now, float4 *mcpos, float nThreads, float4 *isspaf) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int bin; int jt; int MC; int atom; float igo; float fs; float r0; float rmax_l; float vtot_l; float dist2, dist; float pdotr; float cothE; float c1,c2,c3; float dp1,dp2,dp3; float Rz; float f1, f2; float fracDist; float4 xyz_l; float4 r; float4 fi; //float4 fj; float4 mcpos_l; float4 enow_l; float4 e0now_l; // Determine the atom for which the force is being summed on atom = int(double(index)/double(nThreads)); MC = int(index-atom*nThreads); // Zero out the forces fi.x = fi.y = fi.z = 0.0f; //fj.x = fj.y = fj.z = 0.0f; if (atom < nAtoms) { if (MC < nAtoms*nMC) { // Load in position, atom type, and rmax of atom xyz_l = __ldg(xyz+atom); jt = __ldg(isspaTypes + atom); rmax_l = rmax[jt]; vtot_l = vtot[jt]; // Load in field data for the MC point mcpos_l = __ldg(mcpos+MC); enow_l = __ldg(enow+MC); e0now_l = __ldg(e0now+MC); // Finish calculating density igo = __fdividef(vtot_l,e0now_l.w); mcpos_l.w *= igo; // Convert enow into polarzation r0 = norm3df(enow_l.x, enow_l.y, enow_l.z); enow_l.x = __fdividef(enow_l.x,r0); enow_l.y = __fdividef(enow_l.y,r0); enow_l.z = __fdividef(enow_l.z,r0); enow_l.w = r0; e0now_l.x = __fdividef(e0now_l.x,3.0f); e0now_l.y = __fdividef(e0now_l.y,3.0f); e0now_l.z = __fdividef(e0now_l.z,3.0f); e0now_l.w = igo; // Calculate the distance between the MC point and atom1 r = min_image(mcpos_l - xyz_l,box.x,box.y); dist2 = r.x*r.x + r.y*r.y + r.z*r.z; dist = sqrtf(dist2); // Coulombic Force cothE=__fdividef(1.0f,tanhf(enow_l.w)); c1=cothE-__fdividef(1.0f,enow_l.w); c2=1.0f-2.0f*__fdividef(c1,enow_l.w); c3=cothE-3.0f*__fdividef(c2,enow_l.w); Rz=__fdividef(enow_l.x*r.x+enow_l.y*r.y+enow_l.z*r.z,dist); dp1=3.0f*Rz; dp2=7.5f*Rz*Rz-1.5f; dp3=(17.50f*Rz*Rz-7.50f)*Rz; // Calculate dipole term fs = __fdividef(-xyz_l.w*p0*c1*mcpos_l.w,dist2*dist); fi += fs*(r*__fdividef(dp1,dist)-enow_l); //fj += fs*(r*__fdividef(dp1,dist)-enow_l); // Calculate quadrapole term fs = __fdividef(-xyz_l.w*q0*(1.5f*c2-0.5f)*mcpos_l.w,dist2*dist2); fi += fs*(r*__fdividef(dp2,dist)-dp1*enow_l); //fj += fs*(r*__fdividef(dp2,dist)-dp1*enow_l); // Calculate octapole term fs = __fdividef(-xyz_l.w*o0*(2.5f*c3-1.5f*c1)*mcpos_l.w,dist2*dist2*dist); fi += fs*(r*__fdividef(dp3,dist)-dp2*enow_l); //fj += fs*(r*__fdividef(dp3,dist)-dp2*enow_l); // Lennard-Jones Force if (dist <= rmax_l) { bin = int ( __fdividef(dist-forceRparams.x,forceRparams.y) + 0.5f); if (bin >= (nRs)) { fs = 0.0f; } else { //Lennard-Jones Force fracDist = __fdividef((dist-(forceRparams.x+bin*forceRparams.y)),forceRparams.y); f1 = forceTable[jt*nRs+bin]; f2 = forceTable[jt*nRs+bin+1]; fs = (f1*(1.0-fracDist)+f2*fracDist)*mcpos_l.w; fs = fmaf(f2,fracDist,f1*(1.0f-fracDist))*mcpos_l.w; //fs = forceTable[jt*nRs+bin]*mcpos_l.w; } fi += r*__fdividef(-fs,dist); //fj += r*__fdividef(-fs,dist); } else { // Constant Density Dielectric fs=__fdividef(-xyz_l.w*p0,dist2*dist); pdotr=__fdividef(3.0f*(e0now_l.x*r.x+e0now_l.y*r.y+e0now_l.z*r.z),dist2); fi += fs*(pdotr*r-e0now_l)*e0now_l.w; //fj += fs*(pdotr*r-e0now_l)*e0now_l.w; } } } // Warp reduce the forces fi = warpReduceSumTriple(fi); //fj = warpReduceSumTriple(fj); // Add the force to the global force if ((threadIdx.x & (warpSize - 1)) == 0) { atomicAdd(&(f[atom].x), fi.x); atomicAdd(&(f[atom].y), fi.y); atomicAdd(&(f[atom].z), fi.z); //atomicAdd(&(isspaf[atom].x), fj.x); //atomicAdd(&(isspaf[atom].y), fj.y); //atomicAdd(&(isspaf[atom].z), fj.z); } } /* C wrappers for kernels */ float isspa_force_cuda(float4 *xyz_d, float4 *f_d, float4 *isspaf_d, isspa& isspas, int nAtoms_h) { //float isspa_force_cuda(float4 *xyz_d, float4 *f_d, isspa& isspas, int nAtoms_h) { float milliseconds; // timing cudaEventRecord(isspas.isspaStart); cudaProfilerStart(); // zero IS-SPA arrays on GPU cudaMemset(isspas.enow_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); cudaMemset(isspas.e0now_d, 0.0f, nAtoms_h*isspas.nMC*sizeof(float4)); cudaMemset(isspaf_d, 0.0f, nAtoms_h*sizeof(float4)); // compute position of each MC point isspa_MC_points_kernel<<<isspas.mcGridSize,isspas.mcBlockSize >>>(xyz_d, isspas.mcpos_d, isspas.randStates_d, isspas.rmax_d, isspas.isspaTypes_d); // compute densities and mean electric field value for each MC point isspa_field_kernel<<<isspas.fieldGridSize, isspas.fieldBlockSize>>>(xyz_d, isspas.rmax_d, isspas.isspaTypes_d, isspas.isspaGTable_d, isspas.isspaETable_d, isspas.enow_d, isspas.e0now_d, isspas.mcpos_d, isspas.fieldThreads); // compute forces for each atom isspa_force_kernel<<<isspas.forceGridSize, isspas.forceBlockSize>>>(xyz_d,isspas.vtot_d,isspas.rmax_d,isspas.isspaTypes_d,isspas.isspaForceTable_d,f_d,isspas.enow_d,isspas.e0now_d,isspas.mcpos_d,isspas.forceThreads,isspaf_d); cudaDeviceSynchronize(); cudaProfilerStop(); // finish timing cudaEventRecord(isspas.isspaStop); cudaEventSynchronize(isspas.isspaStop); cudaEventElapsedTime(&milliseconds, isspas.isspaStart, isspas.isspaStop); return milliseconds; } void isspa_grid_block(int nAtoms_h, int nPairs_h, float lbox_h, isspa& isspas) { float2 box_h; int maxThreadsPerBlock = 1024; int temp; // determine gridSize and blockSize for MC kernel isspas.mcGridSize = int(ceil(isspas.nMC*nAtoms_h/(float) maxThreadsPerBlock)); isspas.mcBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA mc kernel blocks: %d \n", isspas.mcGridSize); printf("Number of IS-SPA mc kernel threads per block: %d \n", isspas.mcBlockSize); // determine gridSize and blockSize for field kernel temp = int(ceil((nAtoms_h) / (float) 32.0)); isspas.fieldThreads = temp*32; isspas.fieldGridSize = int(ceil(isspas.fieldThreads*nAtoms_h*isspas.nMC / (float) maxThreadsPerBlock)); isspas.fieldBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA field kernel blocks: %d \n", isspas.fieldGridSize); printf("Number of IS-SPA field kernel threads per block: %d \n", isspas.fieldBlockSize); // determine gridSize and blockSize for force kernel temp = int(ceil((nAtoms_h*isspas.nMC) / (float) 32.0)); isspas.forceThreads = temp*32; isspas.forceGridSize = int(ceil(isspas.forceThreads*nAtoms_h / (float) maxThreadsPerBlock)); isspas.forceBlockSize = maxThreadsPerBlock; printf("Number of IS-SPA force kernel blocks: %d \n", isspas.forceGridSize); printf("Number of IS-SPA force kernel threads per block: %d \n", isspas.forceBlockSize); printf("Number of IS-SPA force kernel MC points: %d %d \n", isspas.forceThreads,nAtoms_h*isspas.nMC); // fill box with box and half box length box_h.x = lbox_h; box_h.y = lbox_h/2.0f; // set constant memory cudaMemcpyToSymbol(nMC, &isspas.nMC, sizeof(int)); cudaMemcpyToSymbol(nTypes, &isspas.nTypes, sizeof(int)); cudaMemcpyToSymbol(nRs, &isspas.nRs, sizeof(int)); cudaMemcpyToSymbol(nGRs, &isspas.nGRs, sizeof(int)); cudaMemcpyToSymbol(nERs, &isspas.nERs, sizeof(int)); cudaMemcpyToSymbol(nAtoms, &nAtoms_h, sizeof(int)); cudaMemcpyToSymbol(nPairs, &nPairs_h, sizeof(int)); cudaMemcpyToSymbol(box, &box_h, sizeof(float2)); cudaMemcpyToSymbol(forceRparams, &isspas.forceRparams, sizeof(float2)); cudaMemcpyToSymbol(gRparams, &isspas.gRparams, sizeof(float2)); cudaMemcpyToSymbol(eRparams, &isspas.eRparams, sizeof(float2)); }
5f920a049fc35ba029f8ea011fd76e72bca1616d.hip
// !!! This is a file automatically generated by hipify!!! // ================================================================= // // File: intro5.cu // Author: Pedro Perez // Description: This file shows some of the basic CUDA directives. // // Copyright (c) 2020 by Tecnologico de Monterrey. // All Rights Reserved. May be reproduced for any non-commercial // purpose. // // ================================================================= #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "utils.h" #define SIZE 512 __global__ void add(int *a, int *b, int *c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(int argc, char* argv[]) { int *a, *b, *c; int *d_a, *d_b, *d_c; a = (int*) malloc(SIZE * sizeof(int)); fill_array(a, SIZE); display_array("a", a); b = (int*) malloc(SIZE * sizeof(int)); fill_array(b, SIZE); display_array("b", b); c = (int*) malloc(SIZE * sizeof(int)); hipMalloc((void**) &d_a, SIZE * sizeof(int)); hipMalloc((void**) &d_b, SIZE * sizeof(int)); hipMalloc((void**) &d_c, SIZE * sizeof(int)); hipMemcpy(d_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(1), dim3(SIZE), 0, 0, d_a, d_b, d_c); hipMemcpy(c, d_c, SIZE * sizeof(int), hipMemcpyDeviceToHost); display_array("c", c); hipFree(d_c); hipFree(d_b); hipFree(d_a); free(c); free(b); free(a); return 0; }
5f920a049fc35ba029f8ea011fd76e72bca1616d.cu
// ================================================================= // // File: intro5.cu // Author: Pedro Perez // Description: This file shows some of the basic CUDA directives. // // Copyright (c) 2020 by Tecnologico de Monterrey. // All Rights Reserved. May be reproduced for any non-commercial // purpose. // // ================================================================= #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "utils.h" #define SIZE 512 __global__ void add(int *a, int *b, int *c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(int argc, char* argv[]) { int *a, *b, *c; int *d_a, *d_b, *d_c; a = (int*) malloc(SIZE * sizeof(int)); fill_array(a, SIZE); display_array("a", a); b = (int*) malloc(SIZE * sizeof(int)); fill_array(b, SIZE); display_array("b", b); c = (int*) malloc(SIZE * sizeof(int)); cudaMalloc((void**) &d_a, SIZE * sizeof(int)); cudaMalloc((void**) &d_b, SIZE * sizeof(int)); cudaMalloc((void**) &d_c, SIZE * sizeof(int)); cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice); add<<<1, SIZE>>>(d_a, d_b, d_c); cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost); display_array("c", c); cudaFree(d_c); cudaFree(d_b); cudaFree(d_a); free(c); free(b); free(a); return 0; }
5bfd0502babd44a6048ab9f1d1092e497078724a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*** Calculating a derivative with CD ***/ #include <iostream> #include <fstream> #include <cmath> #include <sys/time.h> void checkErrors(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } err = hipGetLastError(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } } double get_time() { struct timeval tim; hipDeviceSynchronize(); gettimeofday(&tim, NULL); return (double) tim.tv_sec+(tim.tv_usec/1000000.0); } texture<float, 2> tex_u; texture<float, 2> tex_u_prev; // GPU kernels __global__ void copy_kernel (float *u, float *u_prev, int N, int BSZ, int N_max) { // Setting up indices int i = threadIdx.x; int j = threadIdx.y; int x = i + blockIdx.x*BSZ; int y = j + blockIdx.y*BSZ; int I = x + y*N_max; //if (I>=N*N){return;} //if ((x>=N) || (y>=N)){return;} float value = tex2D(tex_u, x, y); u_prev[I] = value; } __global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha, int BSZ, int N_max) { // Setting up indices int i = threadIdx.x; int j = threadIdx.y; int x = i + blockIdx.x*BSZ; int y = j + blockIdx.y*BSZ; int I = x + y*N_max; //if (I>=N*N){return;} //if ((x>=N) || (y>=N)){return;} float t, b, r, l, c; c = tex2D(tex_u_prev, x, y); t = tex2D(tex_u_prev, x, y+1); b = tex2D(tex_u_prev, x, y-1); r = tex2D(tex_u_prev, x+1, y); l = tex2D(tex_u_prev, x-1, y); //if ( (I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1)) if ( (x!=0) && (y!=0) && (x!=N-1) && (y!=N-1)) { u[I] = c + alpha*dt/h/h * (t + b + l + r - 4*c); } } int main(int argc, char * const argv[]) { // Allocate in CPU int N; // For textures to work, N needs to be a multiple of int BLOCKSIZE; // 32. As I will be using BLOCKSIZE to be a multiple of 8 // I'll just look for the closest multiple of BLOCKSIZE (N_max) if (argc != 3) { fprintf(stderr, "You have to provide size(n) and blocksize as arguments.\n"); return -1; } char *p; N = strtoul(argv[1], &p, 10); BLOCKSIZE = strtoul(argv[2], &p, 10); int N_max = (int((N-0.5)/BLOCKSIZE) + 1) * BLOCKSIZE; float xmin = 0.0f; float xmax = 3.5f; float ymin = 0.0f; //float ymax = 2.0f; float h = (xmax-xmin)/(N-1); float dt = 0.00001f; float alpha = 0.645f; float time = 0.4f; int steps = (int)ceil(time/dt); int I, J; float *x = new float[N*N]; float *y = new float[N*N]; float *u = new float[N_max*N_max]; float *u_prev = new float[N*N]; // Initialize for (int j=0; j<N_max; j++) { for (int i=0; i<N_max; i++) { I = N_max*j + i; u[I] = 0.0f; if ( ((i==0) || (j==0)) && (j<N) && (i<N)) {u[I] = 200.0f;} } } // Generate mesh and intial condition for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { I = N*j + i; x[I] = xmin + h*i; y[I] = ymin + h*j; } } // Allocate in GPU float *u_d, *u_prev_d; hipMalloc( (void**) &u_d, N_max*N_max*sizeof(float)); hipMalloc( (void**) &u_prev_d, N_max*N_max*sizeof(float)); // Bind textures hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(NULL, tex_u, u_d, desc, N_max, N_max, sizeof(float)*N_max); hipBindTexture2D(NULL, tex_u_prev, u_prev_d, desc, N_max, N_max, sizeof(float)*N_max); // Copy to GPU hipMemcpy(u_d, u, N_max*N_max*sizeof(float), hipMemcpyHostToDevice); // Loop dim3 dimGrid(int((N_max-0.5)/BLOCKSIZE)+1, int((N_max-0.5)/BLOCKSIZE)+1); dim3 dimBlock(BLOCKSIZE, BLOCKSIZE); double start = get_time(); for (int t=0; t<steps; t++) { // The transfer of u to u_prev needs to be in separate kernel // as it's read only hipLaunchKernelGGL(( copy_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, BLOCKSIZE, N_max); hipLaunchKernelGGL(( update) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, h, dt, alpha, BLOCKSIZE, N_max); } double stop = get_time(); checkErrors("update"); double elapsed = stop - start; printf("%d, %d, %f\n", N, BLOCKSIZE, elapsed); // Copy result back to host hipMemcpy(u, u_d, N_max*N_max*sizeof(float), hipMemcpyDeviceToHost); // std::ofstream temperature("temperature_texture.txt"); // for (int j=0; j<N; j++) // { for (int i=0; i<N; i++) // { I = N*j + i; // J = N_max*j + i; // // std::cout<<u[J]<<"\t"; // temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[J]<<std::endl; // } // temperature<<"\n"; // // std::cout<<std::endl; // } // temperature.close(); // Free device hipUnbindTexture(tex_u); hipUnbindTexture(tex_u_prev); hipFree(u_d); hipFree(u_prev_d); }
5bfd0502babd44a6048ab9f1d1092e497078724a.cu
/*** Calculating a derivative with CD ***/ #include <iostream> #include <fstream> #include <cmath> #include <sys/time.h> void checkErrors(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label); } } double get_time() { struct timeval tim; cudaThreadSynchronize(); gettimeofday(&tim, NULL); return (double) tim.tv_sec+(tim.tv_usec/1000000.0); } texture<float, 2> tex_u; texture<float, 2> tex_u_prev; // GPU kernels __global__ void copy_kernel (float *u, float *u_prev, int N, int BSZ, int N_max) { // Setting up indices int i = threadIdx.x; int j = threadIdx.y; int x = i + blockIdx.x*BSZ; int y = j + blockIdx.y*BSZ; int I = x + y*N_max; //if (I>=N*N){return;} //if ((x>=N) || (y>=N)){return;} float value = tex2D(tex_u, x, y); u_prev[I] = value; } __global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha, int BSZ, int N_max) { // Setting up indices int i = threadIdx.x; int j = threadIdx.y; int x = i + blockIdx.x*BSZ; int y = j + blockIdx.y*BSZ; int I = x + y*N_max; //if (I>=N*N){return;} //if ((x>=N) || (y>=N)){return;} float t, b, r, l, c; c = tex2D(tex_u_prev, x, y); t = tex2D(tex_u_prev, x, y+1); b = tex2D(tex_u_prev, x, y-1); r = tex2D(tex_u_prev, x+1, y); l = tex2D(tex_u_prev, x-1, y); //if ( (I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1)) if ( (x!=0) && (y!=0) && (x!=N-1) && (y!=N-1)) { u[I] = c + alpha*dt/h/h * (t + b + l + r - 4*c); } } int main(int argc, char * const argv[]) { // Allocate in CPU int N; // For textures to work, N needs to be a multiple of int BLOCKSIZE; // 32. As I will be using BLOCKSIZE to be a multiple of 8 // I'll just look for the closest multiple of BLOCKSIZE (N_max) if (argc != 3) { fprintf(stderr, "You have to provide size(n) and blocksize as arguments.\n"); return -1; } char *p; N = strtoul(argv[1], &p, 10); BLOCKSIZE = strtoul(argv[2], &p, 10); int N_max = (int((N-0.5)/BLOCKSIZE) + 1) * BLOCKSIZE; float xmin = 0.0f; float xmax = 3.5f; float ymin = 0.0f; //float ymax = 2.0f; float h = (xmax-xmin)/(N-1); float dt = 0.00001f; float alpha = 0.645f; float time = 0.4f; int steps = (int)ceil(time/dt); int I, J; float *x = new float[N*N]; float *y = new float[N*N]; float *u = new float[N_max*N_max]; float *u_prev = new float[N*N]; // Initialize for (int j=0; j<N_max; j++) { for (int i=0; i<N_max; i++) { I = N_max*j + i; u[I] = 0.0f; if ( ((i==0) || (j==0)) && (j<N) && (i<N)) {u[I] = 200.0f;} } } // Generate mesh and intial condition for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { I = N*j + i; x[I] = xmin + h*i; y[I] = ymin + h*j; } } // Allocate in GPU float *u_d, *u_prev_d; cudaMalloc( (void**) &u_d, N_max*N_max*sizeof(float)); cudaMalloc( (void**) &u_prev_d, N_max*N_max*sizeof(float)); // Bind textures cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(NULL, tex_u, u_d, desc, N_max, N_max, sizeof(float)*N_max); cudaBindTexture2D(NULL, tex_u_prev, u_prev_d, desc, N_max, N_max, sizeof(float)*N_max); // Copy to GPU cudaMemcpy(u_d, u, N_max*N_max*sizeof(float), cudaMemcpyHostToDevice); // Loop dim3 dimGrid(int((N_max-0.5)/BLOCKSIZE)+1, int((N_max-0.5)/BLOCKSIZE)+1); dim3 dimBlock(BLOCKSIZE, BLOCKSIZE); double start = get_time(); for (int t=0; t<steps; t++) { // The transfer of u to u_prev needs to be in separate kernel // as it's read only copy_kernel <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, BLOCKSIZE, N_max); update <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, h, dt, alpha, BLOCKSIZE, N_max); } double stop = get_time(); checkErrors("update"); double elapsed = stop - start; printf("%d, %d, %f\n", N, BLOCKSIZE, elapsed); // Copy result back to host cudaMemcpy(u, u_d, N_max*N_max*sizeof(float), cudaMemcpyDeviceToHost); // std::ofstream temperature("temperature_texture.txt"); // for (int j=0; j<N; j++) // { for (int i=0; i<N; i++) // { I = N*j + i; // J = N_max*j + i; // // std::cout<<u[J]<<"\t"; // temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[J]<<std::endl; // } // temperature<<"\n"; // // std::cout<<std::endl; // } // temperature.close(); // Free device cudaUnbindTexture(tex_u); cudaUnbindTexture(tex_u_prev); cudaFree(u_d); cudaFree(u_prev_d); }
a378fea15f027c047c005860a80c10b2850b0f7e.hip
// !!! This is a file automatically generated by hipify!!! /* * AES Encryption Utility implemented in CUDA * Author: Zorawar Moolenaar <[email protected]> * * usage: * 1. generate random key using `make key` * 2. generate random payload using `make payload` * 3. compile using `make cuda_compile` * 4. GPU encrypt payload using `make cuda_enc` * 5. CPU encrypt payload using `make cuda_ssl` * * ## Algorithm Pseudocode ## * The steps of AES encryption (highlited in the acommpanying paper) are based on pseudocode and explanation from a variety of sources including: """ Announcing the Advanced Encryption Standard (AES) by the National Institute of Standards and Technology (2001) """ and """ Viability study of the CUDA technology for the acceleration of processes by Snchez Castellano, Rubn (2012) """ * Other resources have been cited in the accompanying paper. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include "aes.h" __constant__ byte d_SBOX[256]; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert (hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf (stderr, "GPUassert: %s %s %d\n", hipGetErrorString (code), file, line); if (abort) exit (code); } } // Expanded key array definition. Its length depends on the round numbers byte ExpandKey[60][4]; __global__ void AESEncrypt (byte* gpuBuffer, byte* key, int nStates) { __shared__ byte State[4][4]; int index = blockIdx.x * blockDim.x + threadIdx.x, i, round; if (index < nStates) { // Every thread processes a stream of 16 bytes // i.e. encrypts a block of 128bits of data // This 128-bit data is brought into the shared memory to provide extra speedup for (i = 0; i < 16; i++) State[i / 4][i % 4] = gpuBuffer[i * 16 + i]; // Add the per round Key AddRoundKey (State, 0, key); //For N-1 rounds, perform all four steps of the algorithm for (round = 1; round < ROUNDS; round++) { SubBytes (State); ShiftRows (State); MixColumns (State); AddRoundKey (State, round, key); } // For last round of AES Encryption, skip MixColumns SubBytes (State); ShiftRows (State); AddRoundKey (State, round, key); // Copy encrypted data back to global memory for (i = 0; i < 16; i++) gpuBuffer[index * 16 + i] = State[i / 4][i % 4]; } } int main (int argc, char** argv) { if (argc != 4) { printUsage(); return EXIT_FAILURE; } // data path is stored in this buffer byte *buffer; buffer = (byte*)malloc (MAX_BUFFER_LENGTH); int bytesRead, bytesWritten, returnCode; // Number of state matrix stored on the buffer unsigned long statesInBuffer = 0, processedBytes = 0; char pretty[20]; FILE *payload, *target, *keyFile; byte Key[16]; byte *d_payload, *d_key; size_t maxSz = sizeof(byte) * MAX_BUFFER_LENGTH, expSz = sizeof(byte) * KEY_EXPONENT; ///////////// // OPEN FILES // READ KEY // EXPAND KEY ///////////// prepareFiles (&payload, &target, &keyFile, argv[1], argv[2], argv[3]); returnCode = readKey (keyFile, Key); if (returnCode == EXIT_FAILURE) terminate (payload, target, argv[2], "Key is not 16 bytes. Terminating."); keyExpansion (Key); //////////////////// -- CUDA PLUMBING // allocate memory for key // allocate memory for payload // allocate constant memory for SBOX // create event timers ///////////////////////////////////// gpuErrchk (hipMalloc ((void**) &d_payload, maxSz)); gpuErrchk (hipMalloc ((void**) &d_key, expSz)); gpuErrchk ( hipMemcpy (d_key, ExpandKey, expSz, hipMemcpyHostToDevice)); hipMemcpyToSymbol (d_SBOX, SBOX, 256) ; hipEvent_t start, stop; hipEventCreate (&start); hipEventCreate (&stop); float totalElapsedTime = 0.0; float elapsedtime; /////////////////////////// // Load payload into buffer // Run Kernel to Encrypt // Flush buffer to file /////////////////////////// printf ("Encrypting in Segments\n"); bytesRead = populateBuffer (payload, buffer); while (bytesRead > 0) { statesInBuffer = bytesRead / STATE_SIZE; gpuErrchk (hipMemcpy (d_payload, buffer, maxSz, hipMemcpyHostToDevice)); hipEventRecord (start, 0); elapsedtime = 0.0; dim3 nBlocks(1<<15); dim3 nThreads(1<<10); hipLaunchKernelGGL(( AESEncrypt) , dim3(nBlocks), dim3(nThreads), 0, 0, d_payload, d_key, statesInBuffer); gpuErrchk(hipPeekAtLastError() ); hipEventRecord (stop, 0); hipEventSynchronize (stop); hipEventElapsedTime (&elapsedtime, start, stop); totalElapsedTime += elapsedtime; gpuErrchk(hipMemcpy (buffer, d_payload, maxSz, hipMemcpyDeviceToHost)); prettyPrint(bytesRead, pretty); printf ("...%s processed in this segment\n", pretty); bytesWritten = flushBuffer (buffer, statesInBuffer, target); if (bytesWritten < statesInBuffer) { terminate (payload, target, argv[2], "Error writing the buffer on the output file" ); } processedBytes += bytesRead; bytesRead = populateBuffer (payload, buffer); } ////////////// // Clean-up // Print Stats ////////////// hipEventDestroy (start); hipEventDestroy (stop); prettyPrint(processedBytes, pretty); printf ("Encrypted %s of data using %2.4fs of GPU compute time.\n", pretty, totalElapsedTime/1000); hipFree (d_payload); hipFree (d_key); fclose (payload); fclose (target); return EXIT_SUCCESS; } /*** * AES Utilities ***/ void keyExpansion (byte *key) { byte temp[4]; int word_it; // copy the entire key to a more accessible place memcpy (ExpandKey, key, KEY_SIZE_BYTES); ////////////////////////////////////////// // ROTATE EACH WORD // SUSBTITUTE EACH WORD // XOR W[i-1] with round constant, Rcon[i] ////////////////////////////////////////// for (int Round_it = KEY_EXPONENT; Round_it < (ROUNDS + 1) * 4; Round_it++) { // Get the latest word memcpy (temp, ExpandKey[Round_it - 1], 4); rotateWord (temp, 1); for (word_it = 0; word_it < 4; ++word_it) temp[word_it] = SBOX[ (((temp[word_it] & 0xf0) >> 4) * 16) + (temp[word_it] & 0x0f)]; temp[0] ^= Rcon[Round_it / KEY_EXPONENT]; for (word_it = 0; word_it < 4; word_it++) temp[word_it] ^= ExpandKey[Round_it - KEY_EXPONENT][word_it]; // Save the expanded key segment memcpy (ExpandKey[Round_it], temp, 4); } } __host__ __device__ void rotateWord (byte* word, byte rotationCount) { int i; byte original[4]; for (i = 0; i < 4; ++i) original[i] = word[i]; ///////////////////////////////////////////// // 1. Shift (wrap) row 1 to the left 0 times // 2. Shift (wrap) row 2 to the left 1 time // 3. Shift (wrap) row 3 to the left 2 time // 4. Shift (wrap) row 4 to the left 3 time ///////////////////////////////////////////// for (i = 0; i < 4; i++) word[i] = original[ (i + rotationCount) % 4]; } __device__ void AddRoundKey (byte State[4][4], byte round, byte* key) { /////// // Add a round key to each element of the State using XOR /////// for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) State[i][j] ^= key[ (round * 16) + (i * 4) + j]; } } __device__ void SubBytes (byte State[4][4]) { int i, j, row, col; //////////////////////////////////////////////////////// // 1. Extract the first byte of the State Element // 2. Extract the second byte of the State Element // 3. Use the first and second byte and row and col to // find the appropriate subtituting element /////////////////////////////////////////////////////// for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { row = (State[i][j] & 0xf0) / STATE_SIZE; col = State[i][j] & 0x0f; State[i][j] = d_SBOX[row * STATE_SIZE + col]; } } } __device__ void ShiftRows (byte State[4][4]) { int i, j; byte word[4]; ///////////////////////////////////////////// // 0. Row 1 is not shifted // 1. Shift (wrap) row 2 to the left 1 time // 2. Shift (wrap) row 3 to the left 2 time // 3. Shift (wrap) row 4 to the left 3 time ///////////////////////////////////////////// for (j = 1; j < 4; ++j) { for (i = 0; i < 4; ++i) word[i] = State[i][j]; rotateWord (word, j); for (i = 0; i < 4; ++i) State[i][j] = word[i]; } } __device__ void MixColumns (byte State[4][4]) { int row, col; byte tmp[4]; ////////////////////////////////////////////////////////////////////// // This is some complex galois field operation, but it // essentially boils down to matrix multiplication by a constant matrix ////////////////////////////////////////////////////////////////////// for (row = 0; row < 4; ++row) { tmp[0] = GM2[State[row][0]] ^ GM3[State[row][1]] ^ State[row][2] ^ State[row][3]; tmp[1] = State[row][0] ^ GM2[State[row][1]] ^ GM3[State[row][2]] ^ State[row][3]; tmp[2] = State[row][0] ^ State[row][1] ^ GM2[State[row][2]] ^ GM3[State[row][3]]; tmp[3] = GM3[State[row][0]] ^ State[row][1] ^ State[row][2] ^ GM2[State[row][3]]; for (col = 0; col < 4; ++col) State[row][col] = tmp[col]; } } /** * IO utilities */ void prepareFiles (FILE** payload, FILE** target, FILE** keyFile, char* payloadName, char* targetName, char* keyFilename) { printf ("\nOpening files...\n"); *keyFile = fopen (keyFilename, "rb"); if (*keyFile == NULL) { fprintf (stderr, "Could not open keyfile \"%s\"\n", keyFilename); exit (EXIT_FAILURE); } else printf ("Keyfile \"%s\" is ready.\n", keyFilename); *payload = fopen (payloadName, "rb"); if (*payload == NULL) { fprintf (stderr, "Could not open payload \"%s\"\n", payloadName); fclose (*keyFile); exit (EXIT_FAILURE); } else printf ("Keyfile \"%s\" is ready.\n", payloadName); if (strcmp (payloadName, targetName)) *target = fopen (targetName, "wb"); else { printf ("Target file cannot be the same as payload. Writing to \n: \"%s.out\"\n", targetName); *target = fopen (strcat (targetName, ".out"), "wb"); } if (target == NULL) { fprintf (stderr, "Error creating writable target\n"); fclose (*payload); fclose (*keyFile); exit (EXIT_FAILURE); } else printf ("Target file \"%s\" is ready\n", targetName); } int readKey (FILE *keyFile, byte key[16]) { int bytesRead; bytesRead = fread (key, 1, KEY_SIZE_BYTES, keyFile); printf ("\nReading 16-bytes of the given key...\n"); if (bytesRead != KEY_SIZE_BYTES) { printf("Your key is %d bytes not %d per the AES Spec.\n", bytesRead, KEY_SIZE_BYTES); return EXIT_FAILURE; } printf ("Key successfully loaded:\n"); printKey (key); printf ("Closing KeyFile...\n\n"); fclose (keyFile); return EXIT_SUCCESS; } int populateBuffer (FILE* payload, byte *buffer) { int bytesRead = 0; // return if file is empty if (feof (payload)) return bytesRead; bytesRead = fread (buffer, 1, MAX_BUFFER_LENGTH, payload); char pretty[20]; prettyPrint(bytesRead, pretty); printf ("...Buffer contains %s\n", pretty); return bytesRead; } int flushBuffer (byte outBuffer[], int statesInBuffer, FILE * target) { int bytesWritten; bytesWritten = fwrite (outBuffer, 1, statesInBuffer * STATE_SIZE, target); char pretty[20]; prettyPrint(bytesWritten, pretty); printf ("...Written %s to ouput stream\n\n", pretty); return bytesWritten; } void terminate (FILE* payload, FILE* target, char* targetName, const char* msg) { fprintf (stderr, "\n%s\n", msg); fclose (payload); fclose (target); remove (targetName); exit (EXIT_FAILURE); } /** * Print utilities */ void printUsage() { printf ("./AESencrypt <input_file> <output_file> <key_file>'\n"); } void printStateMatrix (byte state[4][4]) { for (int i = 0; i < 4; ++i) printf ("%02x %02x %02x %02x\n", state[i][0], state[i][1], state[i][2], state[i][3]); printf ("\n"); } void printKey (byte key[16]) { for (int i = 0; i < KEY_SIZE_BYTES; i++) printf ("%02x ", key[i]); printf ("\n"); } void prettyPrint (unsigned long bytes, char result[20]) { if (bytes > (1 << 20)) sprintf (result, "%lu MiB", bytes / (1 << 20)); else if (bytes > (1 << 10)) sprintf (result, "%lu KiB", bytes / (1 << 10)); else if (bytes > 0) sprintf (result, "%lu B", bytes); else sprintf (result, "): Zero Bytes :("); }
a378fea15f027c047c005860a80c10b2850b0f7e.cu
/* * AES Encryption Utility implemented in CUDA * Author: Zorawar Moolenaar <[email protected]> * * usage: * 1. generate random key using `make key` * 2. generate random payload using `make payload` * 3. compile using `make cuda_compile` * 4. GPU encrypt payload using `make cuda_enc` * 5. CPU encrypt payload using `make cuda_ssl` * * ## Algorithm Pseudocode ## * The steps of AES encryption (highlited in the acommpanying paper) are based on pseudocode and explanation from a variety of sources including: """ Announcing the Advanced Encryption Standard (AES) by the National Institute of Standards and Technology (2001) """ and """ Viability study of the CUDA technology for the acceleration of processes by Sánchez Castellano, Rubén (2012) """ * Other resources have been cited in the accompanying paper. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include "aes.h" __constant__ byte d_SBOX[256]; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert (cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf (stderr, "GPUassert: %s %s %d\n", cudaGetErrorString (code), file, line); if (abort) exit (code); } } // Expanded key array definition. Its length depends on the round numbers byte ExpandKey[60][4]; __global__ void AESEncrypt (byte* gpuBuffer, byte* key, int nStates) { __shared__ byte State[4][4]; int index = blockIdx.x * blockDim.x + threadIdx.x, i, round; if (index < nStates) { // Every thread processes a stream of 16 bytes // i.e. encrypts a block of 128bits of data // This 128-bit data is brought into the shared memory to provide extra speedup for (i = 0; i < 16; i++) State[i / 4][i % 4] = gpuBuffer[i * 16 + i]; // Add the per round Key AddRoundKey (State, 0, key); //For N-1 rounds, perform all four steps of the algorithm for (round = 1; round < ROUNDS; round++) { SubBytes (State); ShiftRows (State); MixColumns (State); AddRoundKey (State, round, key); } // For last round of AES Encryption, skip MixColumns SubBytes (State); ShiftRows (State); AddRoundKey (State, round, key); // Copy encrypted data back to global memory for (i = 0; i < 16; i++) gpuBuffer[index * 16 + i] = State[i / 4][i % 4]; } } int main (int argc, char** argv) { if (argc != 4) { printUsage(); return EXIT_FAILURE; } // data path is stored in this buffer byte *buffer; buffer = (byte*)malloc (MAX_BUFFER_LENGTH); int bytesRead, bytesWritten, returnCode; // Number of state matrix stored on the buffer unsigned long statesInBuffer = 0, processedBytes = 0; char pretty[20]; FILE *payload, *target, *keyFile; byte Key[16]; byte *d_payload, *d_key; size_t maxSz = sizeof(byte) * MAX_BUFFER_LENGTH, expSz = sizeof(byte) * KEY_EXPONENT; ///////////// // OPEN FILES // READ KEY // EXPAND KEY ///////////// prepareFiles (&payload, &target, &keyFile, argv[1], argv[2], argv[3]); returnCode = readKey (keyFile, Key); if (returnCode == EXIT_FAILURE) terminate (payload, target, argv[2], "Key is not 16 bytes. Terminating."); keyExpansion (Key); //////////////////// -- CUDA PLUMBING // allocate memory for key // allocate memory for payload // allocate constant memory for SBOX // create event timers ///////////////////////////////////// gpuErrchk (cudaMalloc ((void**) &d_payload, maxSz)); gpuErrchk (cudaMalloc ((void**) &d_key, expSz)); gpuErrchk ( cudaMemcpy (d_key, ExpandKey, expSz, cudaMemcpyHostToDevice)); cudaMemcpyToSymbol (d_SBOX, SBOX, 256) ; cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); float totalElapsedTime = 0.0; float elapsedtime; /////////////////////////// // Load payload into buffer // Run Kernel to Encrypt // Flush buffer to file /////////////////////////// printf ("Encrypting in Segments\n"); bytesRead = populateBuffer (payload, buffer); while (bytesRead > 0) { statesInBuffer = bytesRead / STATE_SIZE; gpuErrchk (cudaMemcpy (d_payload, buffer, maxSz, cudaMemcpyHostToDevice)); cudaEventRecord (start, 0); elapsedtime = 0.0; dim3 nBlocks(1<<15); dim3 nThreads(1<<10); AESEncrypt <<<nBlocks, nThreads>>> (d_payload, d_key, statesInBuffer); gpuErrchk(cudaPeekAtLastError() ); cudaEventRecord (stop, 0); cudaEventSynchronize (stop); cudaEventElapsedTime (&elapsedtime, start, stop); totalElapsedTime += elapsedtime; gpuErrchk(cudaMemcpy (buffer, d_payload, maxSz, cudaMemcpyDeviceToHost)); prettyPrint(bytesRead, pretty); printf ("...%s processed in this segment\n", pretty); bytesWritten = flushBuffer (buffer, statesInBuffer, target); if (bytesWritten < statesInBuffer) { terminate (payload, target, argv[2], "Error writing the buffer on the output file" ); } processedBytes += bytesRead; bytesRead = populateBuffer (payload, buffer); } ////////////// // Clean-up // Print Stats ////////////// cudaEventDestroy (start); cudaEventDestroy (stop); prettyPrint(processedBytes, pretty); printf ("Encrypted %s of data using %2.4fs of GPU compute time.\n", pretty, totalElapsedTime/1000); cudaFree (d_payload); cudaFree (d_key); fclose (payload); fclose (target); return EXIT_SUCCESS; } /*** * AES Utilities ***/ void keyExpansion (byte *key) { byte temp[4]; int word_it; // copy the entire key to a more accessible place memcpy (ExpandKey, key, KEY_SIZE_BYTES); ////////////////////////////////////////// // ROTATE EACH WORD // SUSBTITUTE EACH WORD // XOR W[i-1] with round constant, Rcon[i] ////////////////////////////////////////// for (int Round_it = KEY_EXPONENT; Round_it < (ROUNDS + 1) * 4; Round_it++) { // Get the latest word memcpy (temp, ExpandKey[Round_it - 1], 4); rotateWord (temp, 1); for (word_it = 0; word_it < 4; ++word_it) temp[word_it] = SBOX[ (((temp[word_it] & 0xf0) >> 4) * 16) + (temp[word_it] & 0x0f)]; temp[0] ^= Rcon[Round_it / KEY_EXPONENT]; for (word_it = 0; word_it < 4; word_it++) temp[word_it] ^= ExpandKey[Round_it - KEY_EXPONENT][word_it]; // Save the expanded key segment memcpy (ExpandKey[Round_it], temp, 4); } } __host__ __device__ void rotateWord (byte* word, byte rotationCount) { int i; byte original[4]; for (i = 0; i < 4; ++i) original[i] = word[i]; ///////////////////////////////////////////// // 1. Shift (wrap) row 1 to the left 0 times // 2. Shift (wrap) row 2 to the left 1 time // 3. Shift (wrap) row 3 to the left 2 time // 4. Shift (wrap) row 4 to the left 3 time ///////////////////////////////////////////// for (i = 0; i < 4; i++) word[i] = original[ (i + rotationCount) % 4]; } __device__ void AddRoundKey (byte State[4][4], byte round, byte* key) { /////// // Add a round key to each element of the State using XOR /////// for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) State[i][j] ^= key[ (round * 16) + (i * 4) + j]; } } __device__ void SubBytes (byte State[4][4]) { int i, j, row, col; //////////////////////////////////////////////////////// // 1. Extract the first byte of the State Element // 2. Extract the second byte of the State Element // 3. Use the first and second byte and row and col to // find the appropriate subtituting element /////////////////////////////////////////////////////// for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { row = (State[i][j] & 0xf0) / STATE_SIZE; col = State[i][j] & 0x0f; State[i][j] = d_SBOX[row * STATE_SIZE + col]; } } } __device__ void ShiftRows (byte State[4][4]) { int i, j; byte word[4]; ///////////////////////////////////////////// // 0. Row 1 is not shifted // 1. Shift (wrap) row 2 to the left 1 time // 2. Shift (wrap) row 3 to the left 2 time // 3. Shift (wrap) row 4 to the left 3 time ///////////////////////////////////////////// for (j = 1; j < 4; ++j) { for (i = 0; i < 4; ++i) word[i] = State[i][j]; rotateWord (word, j); for (i = 0; i < 4; ++i) State[i][j] = word[i]; } } __device__ void MixColumns (byte State[4][4]) { int row, col; byte tmp[4]; ////////////////////////////////////////////////////////////////////// // This is some complex galois field operation, but it // essentially boils down to matrix multiplication by a constant matrix ////////////////////////////////////////////////////////////////////// for (row = 0; row < 4; ++row) { tmp[0] = GM2[State[row][0]] ^ GM3[State[row][1]] ^ State[row][2] ^ State[row][3]; tmp[1] = State[row][0] ^ GM2[State[row][1]] ^ GM3[State[row][2]] ^ State[row][3]; tmp[2] = State[row][0] ^ State[row][1] ^ GM2[State[row][2]] ^ GM3[State[row][3]]; tmp[3] = GM3[State[row][0]] ^ State[row][1] ^ State[row][2] ^ GM2[State[row][3]]; for (col = 0; col < 4; ++col) State[row][col] = tmp[col]; } } /** * IO utilities */ void prepareFiles (FILE** payload, FILE** target, FILE** keyFile, char* payloadName, char* targetName, char* keyFilename) { printf ("\nOpening files...\n"); *keyFile = fopen (keyFilename, "rb"); if (*keyFile == NULL) { fprintf (stderr, "Could not open keyfile \"%s\"\n", keyFilename); exit (EXIT_FAILURE); } else printf ("Keyfile \"%s\" is ready.\n", keyFilename); *payload = fopen (payloadName, "rb"); if (*payload == NULL) { fprintf (stderr, "Could not open payload \"%s\"\n", payloadName); fclose (*keyFile); exit (EXIT_FAILURE); } else printf ("Keyfile \"%s\" is ready.\n", payloadName); if (strcmp (payloadName, targetName)) *target = fopen (targetName, "wb"); else { printf ("Target file cannot be the same as payload. Writing to \n: \"%s.out\"\n", targetName); *target = fopen (strcat (targetName, ".out"), "wb"); } if (target == NULL) { fprintf (stderr, "Error creating writable target\n"); fclose (*payload); fclose (*keyFile); exit (EXIT_FAILURE); } else printf ("Target file \"%s\" is ready\n", targetName); } int readKey (FILE *keyFile, byte key[16]) { int bytesRead; bytesRead = fread (key, 1, KEY_SIZE_BYTES, keyFile); printf ("\nReading 16-bytes of the given key...\n"); if (bytesRead != KEY_SIZE_BYTES) { printf("Your key is %d bytes not %d per the AES Spec.\n", bytesRead, KEY_SIZE_BYTES); return EXIT_FAILURE; } printf ("Key successfully loaded:\n"); printKey (key); printf ("Closing KeyFile...\n\n"); fclose (keyFile); return EXIT_SUCCESS; } int populateBuffer (FILE* payload, byte *buffer) { int bytesRead = 0; // return if file is empty if (feof (payload)) return bytesRead; bytesRead = fread (buffer, 1, MAX_BUFFER_LENGTH, payload); char pretty[20]; prettyPrint(bytesRead, pretty); printf ("...Buffer contains %s\n", pretty); return bytesRead; } int flushBuffer (byte outBuffer[], int statesInBuffer, FILE * target) { int bytesWritten; bytesWritten = fwrite (outBuffer, 1, statesInBuffer * STATE_SIZE, target); char pretty[20]; prettyPrint(bytesWritten, pretty); printf ("...Written %s to ouput stream\n\n", pretty); return bytesWritten; } void terminate (FILE* payload, FILE* target, char* targetName, const char* msg) { fprintf (stderr, "\n%s\n", msg); fclose (payload); fclose (target); remove (targetName); exit (EXIT_FAILURE); } /** * Print utilities */ void printUsage() { printf ("./AESencrypt <input_file> <output_file> <key_file>'\n"); } void printStateMatrix (byte state[4][4]) { for (int i = 0; i < 4; ++i) printf ("%02x %02x %02x %02x\n", state[i][0], state[i][1], state[i][2], state[i][3]); printf ("\n"); } void printKey (byte key[16]) { for (int i = 0; i < KEY_SIZE_BYTES; i++) printf ("%02x ", key[i]); printf ("\n"); } void prettyPrint (unsigned long bytes, char result[20]) { if (bytes > (1 << 20)) sprintf (result, "%lu MiB", bytes / (1 << 20)); else if (bytes > (1 << 10)) sprintf (result, "%lu KiB", bytes / (1 << 10)); else if (bytes > 0) sprintf (result, "%lu B", bytes); else sprintf (result, "): Zero Bytes :("); }
80ca4f688cee8bbd7db93bcbe9c8674edd2bef15.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "knn/knn.cu" #include <vector> #include <gtest/gtest.h> #include <cuda_utils.h> #include <test_utils.h> #include <iostream> namespace ML { using namespace MLCommon; /** * * NOTE: Not exhaustively testing the kNN implementation since * we are using FAISS for this. Just testing API to verify the * knn.cu class is accepting inputs and providing outputs as * expected. */ template<typename T> class KNN_MGTest: public ::testing::Test { protected: void basicTest() { // Allocate input hipSetDevice(0); allocate(d_train_inputs_dev1, n * d); hipSetDevice(1); allocate(d_train_inputs_dev2, n * d); // Allocate reference arrays allocate<long>(d_ref_I, n*n); allocate(d_ref_D, n*n); // Allocate predicted arrays allocate<long>(d_pred_I, n*n); allocate(d_pred_D, n*n); // make test data on host std::vector<T> h_train_inputs = {1.0, 50.0, 51.0}; h_train_inputs.resize(n); updateDevice(d_train_inputs_dev1, h_train_inputs.data(), n*d); updateDevice(d_train_inputs_dev2, h_train_inputs.data(), n*d); std::vector<T> h_res_D = { 0.0, 0.0, 2401.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0 }; h_res_D.resize(n*n); updateDevice(d_ref_D, h_res_D.data(), n*n); std::vector<long> h_res_I = { 0, 3, 1, 1, 4, 2, 2, 5, 1 }; h_res_I.resize(n*n); updateDevice<long>(d_ref_I, h_res_I.data(), n*n); params[0] = { d_train_inputs_dev1, n }; params[1] = { d_train_inputs_dev2, n }; hipSetDevice(0); knn->fit(params, 2); knn->search(d_train_inputs_dev1, n, d_pred_I, d_pred_D, n); } void SetUp() override { basicTest(); } void TearDown() override { CUDA_CHECK(hipFree(d_train_inputs_dev1)); CUDA_CHECK(hipFree(d_train_inputs_dev2)); CUDA_CHECK(hipFree(d_pred_I)); CUDA_CHECK(hipFree(d_pred_D)); CUDA_CHECK(hipFree(d_ref_I)); CUDA_CHECK(hipFree(d_ref_D)); } protected: T* d_train_inputs_dev1; T* d_train_inputs_dev2; kNNParams *params = new kNNParams[2]; int n = 3; int d = 1; long *d_pred_I; T* d_pred_D; long *d_ref_I; T* d_ref_D; kNN *knn = new kNN(d); }; typedef KNN_MGTest<float> KNNTestF; TEST_F(KNNTestF, Fit) { ASSERT_TRUE( devArrMatch(d_ref_D, d_pred_D, n*n, Compare<float>())); ASSERT_TRUE( devArrMatch(d_ref_I, d_pred_I, n*n, Compare<long>())); } } // end namespace ML
80ca4f688cee8bbd7db93bcbe9c8674edd2bef15.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "knn/knn.cu" #include <vector> #include <gtest/gtest.h> #include <cuda_utils.h> #include <test_utils.h> #include <iostream> namespace ML { using namespace MLCommon; /** * * NOTE: Not exhaustively testing the kNN implementation since * we are using FAISS for this. Just testing API to verify the * knn.cu class is accepting inputs and providing outputs as * expected. */ template<typename T> class KNN_MGTest: public ::testing::Test { protected: void basicTest() { // Allocate input cudaSetDevice(0); allocate(d_train_inputs_dev1, n * d); cudaSetDevice(1); allocate(d_train_inputs_dev2, n * d); // Allocate reference arrays allocate<long>(d_ref_I, n*n); allocate(d_ref_D, n*n); // Allocate predicted arrays allocate<long>(d_pred_I, n*n); allocate(d_pred_D, n*n); // make test data on host std::vector<T> h_train_inputs = {1.0, 50.0, 51.0}; h_train_inputs.resize(n); updateDevice(d_train_inputs_dev1, h_train_inputs.data(), n*d); updateDevice(d_train_inputs_dev2, h_train_inputs.data(), n*d); std::vector<T> h_res_D = { 0.0, 0.0, 2401.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0 }; h_res_D.resize(n*n); updateDevice(d_ref_D, h_res_D.data(), n*n); std::vector<long> h_res_I = { 0, 3, 1, 1, 4, 2, 2, 5, 1 }; h_res_I.resize(n*n); updateDevice<long>(d_ref_I, h_res_I.data(), n*n); params[0] = { d_train_inputs_dev1, n }; params[1] = { d_train_inputs_dev2, n }; cudaSetDevice(0); knn->fit(params, 2); knn->search(d_train_inputs_dev1, n, d_pred_I, d_pred_D, n); } void SetUp() override { basicTest(); } void TearDown() override { CUDA_CHECK(cudaFree(d_train_inputs_dev1)); CUDA_CHECK(cudaFree(d_train_inputs_dev2)); CUDA_CHECK(cudaFree(d_pred_I)); CUDA_CHECK(cudaFree(d_pred_D)); CUDA_CHECK(cudaFree(d_ref_I)); CUDA_CHECK(cudaFree(d_ref_D)); } protected: T* d_train_inputs_dev1; T* d_train_inputs_dev2; kNNParams *params = new kNNParams[2]; int n = 3; int d = 1; long *d_pred_I; T* d_pred_D; long *d_ref_I; T* d_ref_D; kNN *knn = new kNN(d); }; typedef KNN_MGTest<float> KNNTestF; TEST_F(KNNTestF, Fit) { ASSERT_TRUE( devArrMatch(d_ref_D, d_pred_D, n*n, Compare<float>())); ASSERT_TRUE( devArrMatch(d_ref_I, d_pred_I, n*n, Compare<long>())); } } // end namespace ML
e483c28a4f289a08077736ec74518cab09c6b0eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include "SocialForceGPU2.h" #include <omp.h> __global__ void testFunc() { } namespace NeighborModule { __device__ int zcode(int x, int y) { //return x * NUM_CELL + y; x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 y &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 y = (y ^ (y << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 y = (y ^ (y << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 return x | (y << 1); } __device__ int zcode(const double2 &loc) { int ix = loc.x / (ENV_DIM / NUM_CELL); int iy = loc.y / (ENV_DIM / NUM_CELL); return zcode(ix, iy); } __device__ int zcode(SocialForceAgent *agent) { return zcode(agent->data.loc); } __device__ void swap(SocialForceAgent** agentPtrs, int a, int b) { SocialForceAgent* temp = agentPtrs[a]; agentPtrs[a] = agentPtrs[b]; agentPtrs[b] = temp; } __device__ void quickSortByAgentLoc(SocialForceAgent** agentPtrs, hiprandState_t &rState, int l, int r) { if (l == r) return; int pi = l + hiprand(&rState) % (r - l); swap(agentPtrs, l, pi); SocialForceAgent* pivot = agentPtrs[l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (zcode(agentPtrs[j]) < zcode(pivot)) { swap(agentPtrs, i, j); i++; } } swap(agentPtrs, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, i, r); } __global__ void sortAgentByLocKernel(SocialForceAgent** agentPtrsToSort, hiprandState_t *rState, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; hiprandState_t &rStateLocal = *rState; if (idx == 0) quickSortByAgentLoc(agentPtrsToSort, rStateLocal, 0, numCap); } __global__ void setCidStartEndKernel(SocialForceAgent** contextSorted, int* cidStarts, int* cidEnds, int numCap) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numCap && idx > 0) { int cid = zcode(contextSorted[idx]); int cidPrev = zcode(contextSorted[idx - 1]); if (cid != cidPrev) { cidStarts[cid] = idx; cidEnds[cidPrev] = idx; } } if (idx == 0) { int cid = zcode(contextSorted[0]); cidStarts[cid] = 0; cid = zcode(contextSorted[numCap - 1]); cidEnds[cid] = numCap; } } } extern "C" void runTest() { testFunc << <32, 32 >> >(); } /* helper functions and data structures*/ #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString(err)); exit(-1); } } namespace APUtil { __global__ void hookPointerAndDataKernel(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numCap) agentPtrArray[index] = &agentArray[index]; } }; extern "C" void hookPointerAndData(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int gSize = GRID_SIZE(numCap); APUtil::hookPointerAndDataKernel << <gSize, BLOCK_SIZE >> >(agentPtrArray, agentArray, numCap); } __device__ double SocialForceAgent::correctCrossBoader(double val, double limit) { if (val >= limit) return limit - 0.001; else if (val < 0) return 0; return val; } void SocialForceAgent::computeIndivSocialForceRoom(const SocialForceAgentData &myData, const SocialForceAgentData &otherData, double2 &fSum){ double cMass = 100; //my data const double2& loc = myData.loc; const double2& goal = myData.goal; const double2& velo = myData.velocity; const double& v0 = myData.v0; const double& mass = myData.mass; //other's data const double2& locOther = otherData.loc; const double2& goalOther = otherData.goal; const double2& veloOther = otherData.velocity; const double& v0Other = otherData.v0; const double& massOther = otherData.mass; double d = 1e-15 + sqrt((loc.x - locOther.x) * (loc.x - locOther.x) + (loc.y - locOther.y) * (loc.y - locOther.y)); double dDelta = mass / cMass + massOther / cMass - d; double fExp = A * exp(dDelta / B); double fKg = dDelta < 0 ? 0 : k1 *dDelta; double nijx = (loc.x - locOther.x) / d; double nijy = (loc.y - locOther.y) / d; double fnijx = (fExp + fKg) * nijx; double fnijy = (fExp + fKg) * nijy; double fkgx = 0; double fkgy = 0; if (dDelta > 0) { double tix = -nijy; double tiy = nijx; fkgx = k2 * dDelta; fkgy = k2 * dDelta; double vijDelta = (veloOther.x - velo.x) * tix + (veloOther.y - velo.y) * tiy; fkgx = fkgx * vijDelta * tix; fkgy = fkgy * vijDelta * tiy; } fSum.x += fnijx + fkgx; fSum.y += fnijy + fkgy; } __device__ void SocialForceAgent::computeForceWithWall(const SocialForceAgentData &dataLocal, obstacleLine &wall, const int &cMass, double2 &fSum) { double2 wl = make_double2(wall.ex - wall.sx, wall.ey - wall.sy); if (length(wl) == 0) return; double diw, crx, cry; const double2 &loc = dataLocal.loc; diw = wall.pointToLineDist(loc, crx, cry); double virDiw = DIST(loc.x, loc.y, crx, cry); if (virDiw == 0) return; double niwx = (loc.x - crx) / virDiw; double niwy = (loc.y - cry) / virDiw; double drw = dataLocal.mass / cMass - diw; double fiw1 = A * exp(drw / B); if (drw > 0) fiw1 += k1 * drw; double fniwx = fiw1 * niwx; double fniwy = fiw1 * niwy; double fiwKgx = 0, fiwKgy = 0; if (drw > 0) { double fiwKg = k2 * drw * (dataLocal.velocity.x * (-niwy) + dataLocal.velocity.y * niwx); fiwKgx = fiwKg * (-niwy); fiwKgy = fiwKg * niwx; } fSum.x += fniwx - fiwKgx; fSum.y += fniwy - fiwKgy; } __device__ void SocialForceAgent::computeWallImpaction(const SocialForceAgentData &dataLocal, obstacleLine &wall, const double2 &newVelo, const double &tick, double &mint){ double crx, cry, tt; const double2 &loc = dataLocal.loc; int ret = wall.intersection2LineSeg( loc.x, loc.y, loc.x + 0.5 * newVelo.x * tick, loc.y + 0.5 * newVelo.y * tick, crx, cry ); if (ret == 1) { if (fabs(crx - loc.x) > 0) tt = (crx - loc.x) / (newVelo.x * tick); else tt = (crx - loc.y) / (newVelo.y * tick + 1e-20); if (tt < mint) mint = tt; } } __device__ void SocialForceAgent::computeDirection(const SocialForceAgentData &dataLocal, double2 &dvt) { //my data const double2& loc = dataLocal.loc; const double2& goal = dataLocal.goal; const double2& velo = dataLocal.velocity; const double& v0 = dataLocal.v0; const double& mass = dataLocal.mass; dvt.x = 0; dvt.y = 0; double2 diff; diff.x = 0; diff.y = 0; double d0 = sqrt((loc.x - goal.x) * (loc.x - goal.x) + (loc.y - goal.y) * (loc.y - goal.y)); diff.x = v0 * (goal.x - loc.x) / d0; diff.y = v0 * (goal.y - loc.y) / d0; dvt.x = (diff.x - velo.x) / tao; dvt.y = (diff.y - velo.y) / tao; } __device__ int sharedMinAndMax(int value, bool minFlag) { for (int i = 16; i >= 1; i /= 2) { if (minFlag) value = min(value, __shfl_xor(value, i, 32)); else value = max(value, __shfl_xor(value, i, 32)); } return value; } __device__ void SocialForceAgent::computeSocialForceRoom(SocialForceAgentData &dataLocal, double2 &fSum) { //__shared__ SocialForceAgentData sdata[BLOCK_SIZE]; fSum.x = 0; fSum.y = 0; double ds = 0; int neighborCount = 0; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 31; int cidStart = 0; int cidEnd = NUM_CAP; //while (cidStart < cidEnd) { //if (cidStart + threadIdx.x < cidEnd) { // SocialForceAgent *other = myClone->context[cidStart + threadIdx.x]; // sdata[threadIdx.x] = other->data; //} //int iterCount = cidEnd - cidStart > BLOCK_SIZE ? BLOCK_SIZE : cidEnd - cidStart; for (int i = 0; i < NUM_CAP; i++) { SocialForceAgentData otherData = myClone->context[i]->data; ds = length(otherData.loc - dataLocal.loc); if (ds < 6 && ds > 0) { neighborCount++; computeIndivSocialForceRoom(dataLocal, otherData, fSum); } } // cidStart += BLOCK_SIZE; //} /* for (int i = 0; i < NUM_CAP; i++) { SocialForceAgent *other = myClone->context[i]; SocialForceAgentData otherData = other->data; ds = length(otherData.loc - dataLocal.loc); if (ds < 6 && ds > 0) { neighborCount++; computeIndivSocialForceRoom(dataLocal, otherData, fSum); } } */ dataLocal.numNeighbor = neighborCount; } __device__ void SocialForceAgent::chooseNewGoal(const double2 &newLoc, double epsilon, double2 &newGoal) { double2 oldGoal = newGoal; double2 center = make_double2(ENV_DIM / 2, ENV_DIM / 2); if (newLoc.x < center.x && newLoc.y <= center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } else if (newLoc.x <= center.x && newLoc.y > center.y) { newGoal.x = 0.3 * ENV_DIM; newGoal.y = 0.5 * ENV_DIM; } else if (newLoc.x > center.x && newLoc.y > center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.7 * ENV_DIM; } else if (newLoc.x >= center.x && newLoc.y < center.y){ newGoal.x = 0.9 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } } __device__ void SocialForceAgent::step(){ double cMass = 100; const double2& loc = data.loc; const double2& goal = data.goal; const double2& velo = data.velocity; const double& v0 = data.v0; const double& mass = data.mass; //compute the direction double2 dvt; computeDirection(data, dvt); //compute force with other agents double2 fSum; computeSocialForceRoom(data, fSum); //compute force with walls and gates for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeForceWithWall(data, wall, cMass, fSum); } //sum up dvt.x += fSum.x / mass; dvt.y += fSum.y / mass; double2 newVelo = data.velocity; double2 newLoc = data.loc; double2 newGoal = data.goal; double tick = 0.1; newVelo.x += dvt.x * tick * (1);// + this->random->gaussian() * 0.1); newVelo.y += dvt.y * tick * (1);// + this->random->gaussian() * 0.1); double dv = sqrt(newVelo.x * newVelo.x + newVelo.y * newVelo.y); if (dv > maxv) { newVelo.x = newVelo.x * maxv / dv; newVelo.y = newVelo.y * maxv / dv; } double mint = 1; for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeWallImpaction(data, wall, newVelo, tick, mint); } newVelo.x *= mint; newVelo.y *= mint; newLoc.x += newVelo.x * tick; newLoc.y += newVelo.y * tick; double goalTemp = goal.x; chooseNewGoal(newLoc, mass / cMass, newGoal); newLoc.x = correctCrossBoader(newLoc.x, ENV_DIM); newLoc.y = correctCrossBoader(newLoc.y, ENV_DIM); dataCopy = data; dataCopy.loc = newLoc; dataCopy.velocity = newVelo; dataCopy.goal = newGoal; } __device__ void SocialForceAgent::init(SocialForceClone* c, int idx) { this->contextId = idx; //this->myOrigin = NULL; this->goalIdx = 0; this->myClone = c; hiprandState_t rStateLocal = c->rState[idx]; this->color.x = hiprand(&rStateLocal) % 256; this->color.y = hiprand(&rStateLocal) % 256; this->color.z = hiprand(&rStateLocal) % 256; this->color.w = hiprand(&rStateLocal) % 256; SocialForceAgentData & dataLocal = this->data; //= &sfModel->originalAgents->dataArray[dataSlot]; float rx = (float)(idx / 32) / (float)32; float ry = (float)(idx % 32) / (float)32; dataLocal.loc.x = (0.6 + 0.1 * hiprand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.loc.y = (0.5 + 0.4 * hiprand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.velocity.x = 2;//4 * (this->random->uniform()-0.5); dataLocal.velocity.y = 2;//4 * (this->random->uniform()-0.5); dataLocal.v0 = 2; dataLocal.mass = 50; dataLocal.numNeighbor = 0; //chooseNewGoal(dataLocal.loc, 0, dataLocal.goal); dataLocal.goal = make_double2(0.5 * ENV_DIM, 0.7 * ENV_DIM); this->dataCopy = dataLocal; } __device__ void SocialForceAgent::initNewClone(SocialForceAgent *parent, SocialForceClone *childClone) { this->color = childClone->color; this->contextId = parent->contextId; //this->myOrigin = parent; this->myClone = childClone; this->goalIdx = parent->goalIdx; for (int i = 0; i < NUM_GOAL; i++) this->goalSeq[i] = parent->goalSeq[i]; this->data = parent->data; this->dataCopy = parent->dataCopy; } namespace clone { __global__ void stepKernel(SocialForceClone *c, int numElemLocal) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElemLocal) c->ap->agentPtrArray[index]->step(); } __global__ void swapKernel(SocialForceClone *c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; agent.data = agent.dataCopy; } } } void SocialForceClone::step(int stepCount) { if (numElem == 0) return; int gSize; //alterGate(stepCount); /* hipMemcpyAsync(contextSorted, context, sizeof(SocialForceAgent*) * NUM_CAP, hipMemcpyDeviceToDevice, myStream); hipStreamSynchronize(myStream); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->contextSorted, this->rState, NUM_CAP); hipMemsetAsync(cidStarts, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); hipMemsetAsync(cidEnds, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); hipStreamSynchronize(myStream); gSize = GRID_SIZE(NUM_CAP); NeighborModule::setCidStartEndKernel<<<gSize, BLOCK_SIZE, 0, myStream>>>(contextSorted, cidStarts, cidEnds, NUM_CAP); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->apHost->agentPtrArray, this->rState, this->numElem); */ gSize = GRID_SIZE(numElem); size_t smemSize = sizeof(SocialForceAgentData) * BLOCK_SIZE; clone::stepKernel << <gSize, BLOCK_SIZE, smemSize, myStream >> >(selfDev, numElem); //clone::stepKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::swap() { if (numElem == 0) return; int gSize = GRID_SIZE(numElem); clone::swapKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::alterGate(int stepCount) { bool changed = false; for (int i = 0; i < NUM_PARAM; i++) { if (cloneParams[i] == stepCount) { changed = true; gates[i].init(0, 0, 0, 0); //hipMemcpyAsync(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), hipMemcpyHostToDevice, myStream); hipMemcpy(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), hipMemcpyHostToDevice); } } } namespace AppUtil { __device__ bool cloningCondition(SocialForceAgent *agent, SocialForceClone *parentClone, SocialForceClone *childClone) { // if agent has been cloned? if (childClone->cloneFlags[agent->contextId] == true) return false; // active cloning condition double2 &loc = agent->data.loc; for (int i = 0; i < NUM_PARAM; i++) { int param1 = parentClone->cloneParams[i]; int param2 = childClone->cloneParams[i]; if (param1 != param2) { obstacleLine g1 = parentClone->gates[i]; obstacleLine g2 = childClone->gates[i]; if (g1.pointToLineDist(loc) < 6) return true; if (g2.pointToLineDist(loc) < 6) return true; } } // passive cloning condition #define MY_MAX(a, b) (a > b ? a : b) #define MY_MIN(a, b) (a < b ? a : b) int minx = MY_MAX((loc.x - RADIUS_I) / CELL_DIM, 0); int miny = MY_MAX((loc.y - RADIUS_I) / CELL_DIM, 0); int maxx = MY_MIN((loc.x + RADIUS_I) / CELL_DIM, NUM_CELL - 1); int maxy = MY_MIN((loc.y + RADIUS_I) / CELL_DIM, NUM_CELL - 1); for (int i = minx; i <= maxx; i++) for (int j = miny; j <= maxy; j++) if (childClone->takenMap[i * NUM_CELL + j]) return true; // pass all the check, don't need to be cloned return false; } __global__ void updateContextKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent *agent = c->ap->agentPtrArray[idx]; c->context[agent->contextId] = agent; } } __global__ void constructPassiveMap(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; int takenId = agent.data.loc.x / CELL_DIM; takenId = takenId * NUM_CELL + agent.data.loc.y / CELL_DIM; c->takenMap[takenId] = true; } } __global__ void performCloningKernel(SocialForceClone *p, SocialForceClone *c, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < p->numElem) { SocialForceAgent *agent = p->ap->agentPtrArray[idx]; if (cloningCondition(agent, p, c)) { uint lastNum = atomicInc(&c->numElem, numCap); SocialForceAgent& childAgent = *c->ap->agentPtrArray[lastNum]; c->ap->takenFlags[lastNum] = true; childAgent.initNewClone(agent, c); c->context[childAgent.contextId] = &childAgent; c->cloneFlags[childAgent.contextId] = true; //c->numElem++; /* not written back */ } } } __global__ void compareAndEliminateKernel(SocialForceClone *p, SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &childAgent = *c->ap->agentPtrArray[idx]; SocialForceAgent &parentAgent = *p->context[childAgent.contextId]; // *(SocialForceAgent*)childAgent.myOrigin; double velDiff = length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity); double locDiff = length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc); if (locDiff == 0 && velDiff == 0) { c->ap->takenFlags[idx] = false; c->cloneFlags[childAgent.contextId] = false; } } } template<class T> __device__ void swap(T * ar, int a, int b) { T t1 = ar[a]; ar[a] = ar[b]; ar[b] = t1; } __global__ void reorderKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx == 0) { int l = 0; int r = numElem; int i = l, j = l; for (; j < r; j++) { if (c->ap->takenFlags[j] == true) { swap<SocialForceAgent*>(c->ap->agentPtrArray, i, j); swap<bool>(c->ap->takenFlags, i, j); i++; } } c->numElem = i; } } }; void SocialForceSimApp::performClone(SocialForceClone *parentClone, SocialForceClone *childClone) { childClone->parentCloneid = parentClone->cloneid; // 1. copy the context of parent clone hipMemcpyAsync(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), hipMemcpyDeviceToDevice, childClone->myStream); hipStreamSynchronize(childClone->myStream); //hipMemcpy(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), hipMemcpyDeviceToDevice); getLastCudaError("perform clone"); // 2. update the context with agents of its own if (childClone->numElem > 0) { int gSize = GRID_SIZE(childClone->numElem); AppUtil::updateContextKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::updateContextKernel << <gSize, BLOCK_SIZE >> >(childClone->selfDev, childClone->numElem); } getLastCudaError("perform clone"); // 3. construct passive cloning map if (childClone->numElem > 0) { hipMemsetAsync(childClone->selfDev->takenMap, 0, sizeof(bool) * NUM_CELL * NUM_CELL, childClone->myStream); hipStreamSynchronize(childClone->myStream); //hipMemset(childClone->selfDev->takenMap, 0, sizeof(bool) * NUM_CELL * NUM_CELL); int gSize = GRID_SIZE(childClone->numElem); AppUtil::constructPassiveMap << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::constructPassiveMap << <gSize, BLOCK_SIZE >> >(childClone->selfDev, childClone->numElem); } getLastCudaError("perform clone"); // 4. perform active and passive cloning (in cloningCondition checking) int gSize = GRID_SIZE(NUM_CAP); //AppUtil::performCloningKernel << <gSize, BLOCK_SIZE >> >(parentClone->selfDev, childClone->selfDev, NUM_CAP); AppUtil::performCloningKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, NUM_CAP); hipMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), hipMemcpyDeviceToHost, childClone->myStream); hipStreamSynchronize(childClone->myStream); getLastCudaError("perform clone"); } void compareAndEliminateCPU(SocialForceClone *parentClone, SocialForceClone *childClone) { wchar_t message[20]; for (int i = 0; i < childClone->numElem; i++) { SocialForceAgent &childAgent = *childClone->ap->agentPtrArray[i]; SocialForceAgent parentAgent; // *(SocialForceAgent*)childAgent.myOrigin; if (length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity) == 0 && length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc) == 0) { childClone->ap->takenFlags[i] = false; childClone->cloneFlags[childAgent.contextId] = false; } /*else { if (childClone->cloneid == 4) { swprintf_s(message, 20, L"not false: %d\n", i); OutputDebugString(message); } }*/ } childClone->numElem = childClone->ap->reorder(childClone->numElem); } void SocialForceSimApp::compareAndEliminate(SocialForceClone *parentClone, SocialForceClone *childClone) { if (childClone->numElem == 0) return; int gSize = GRID_SIZE(childClone->numElem); AppUtil::compareAndEliminateKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); //AppUtil::compareAndEliminateKernel << <gSize, BLOCK_SIZE>> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); gSize = GRID_SIZE(NUM_CAP); AppUtil::reorderKernel << <1, 1, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::reorderKernel << <1, 1 >> >(childClone->selfDev, childClone->numElem); hipMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), hipMemcpyDeviceToHost, childClone->myStream); hipStreamSynchronize(childClone->myStream); } void SocialForceSimApp::proc(int p, int c, bool o, char *s) { performClone(cAll[p], cAll[c]); cAll[c]->step(stepCount); if (o) { if (stepCount < 800) cAll[c]->output(stepCount, s); } compareAndEliminate(cAll[p], cAll[c]); } void swap(int **cloneTree, int a, int b) { int t1 = cloneTree[0][a]; cloneTree[0][a] = cloneTree[0][b]; cloneTree[0][b] = t1; t1 = cloneTree[1][a]; cloneTree[1][a] = cloneTree[1][b]; cloneTree[1][b] = t1; } void quickSort(int **cloneTree, int l, int r) { if (l == r) return; int pi = l + rand() % (r - l); swap(cloneTree, l, pi); int pivot = cloneTree[0][l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (cloneTree[0][j] < pivot) { swap(cloneTree, i, j); i++; } } swap(cloneTree, l, i - 1); quickSort(cloneTree, l, i - 1); quickSort(cloneTree, i, r); } void SocialForceSimApp::mst() { // clone diff matrix int **cloneDiff = new int*[totalClone]; for (int i = 0; i < totalClone; i++) { cloneDiff[i] = new int[totalClone]; for (int j = 0; j < totalClone; j++) cloneDiff[i][j] = 0; } for (int i = 0; i < totalClone; i++) { for (int j = 0; j < totalClone; j++) { for (int k = 0; k < NUM_PARAM; k++) { if (cAll[i]->cloneParams[k] != cAll[j]->cloneParams[k]) cloneDiff[i][j]++; } wchar_t message[20]; swprintf_s(message, 20, L"%d ", cloneDiff[i][j]); OutputDebugString(message); } OutputDebugString(L"\n"); } int *parent = cloneTree[0] = new int[totalClone]; int *child = cloneTree[1] = new int[totalClone]; int *key = new int[totalClone]; bool *mstSet = new bool[totalClone]; for (int i = 0; i < totalClone; i++) child[i] = i, key[i] = INT_MAX, mstSet[i] = false; key[0] = 0; parent[0] = -1; child[0] = 0; int count = 0; while (count++ < totalClone - 1) { int minKey = INT_MAX; int minIdx; for (int j = 0; j < totalClone; j++) if (mstSet[j] == false && key[j] < minKey) minKey = key[j], minIdx = j; mstSet[minIdx] = true; for (int j = 0; j < totalClone; j++) if (cloneDiff[minIdx][j] && mstSet[j] == false && cloneDiff[minIdx][j] < key[j]) parent[j] = minIdx, key[j] = cloneDiff[minIdx][j]; } quickSort(cloneTree, 0, totalClone); for (int i = 1; i < totalClone; i++) { wchar_t message[20]; swprintf_s(message, 20, L"%d - %d: %d\n", cloneTree[0][i], cloneTree[1][i], cloneDiff[i][parent[i]]); OutputDebugString(message); } delete mstSet; delete key; } __global__ void getLocAndColorKernel(SocialForceClone *c, double2 *loc, uchar4 *color, int *contextId, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { loc[idx] = c->context[idx]->data.loc; color[idx] = c->context[idx]->color; contextId[idx] = c->context[idx]->contextId; } } void SocialForceSimApp::getLocAndColorFromDevice(){ SocialForceClone *c = cAll[paintId]; int gSize = GRID_SIZE(NUM_CAP); getLocAndColorKernel << <gSize, BLOCK_SIZE >> >(c->selfDev, debugLocDev, debugColorDev, debugContextIdDev, NUM_CAP); hipMemcpy(debugLocHost, debugLocDev, sizeof(double2) * NUM_CAP, hipMemcpyDeviceToHost); hipMemcpy(debugColorHost, debugColorDev, sizeof(uchar4) * NUM_CAP, hipMemcpyDeviceToHost); hipMemcpy(debugContextIdHost, debugContextIdDev, sizeof(int) * NUM_CAP, hipMemcpyDeviceToHost); hipMemcpy(c->takenMap, c->selfDev->takenMap, sizeof(bool) * NUM_CELL * NUM_CELL, hipMemcpyDeviceToHost); //hipMemcpy(debugCidStartsHost, c->cidStarts, sizeof(int) * NUM_CELL * NUM_CELL, hipMemcpyDeviceToHost); //hipMemcpy(debugCidEndsHost, c->cidEnds, sizeof(int) * NUM_CELL * NUM_CELL, hipMemcpyDeviceToHost); //wchar_t message[128]; //for (int i = 0; i < NUM_CELL * NUM_CELL; i++) { // swprintf_s(message, L"(%d, %d) ", debugCidStartsHost[i], debugCidEndsHost[i]); // OutputDebugString(message); //} //OutputDebugString(L"\n"); } __global__ void initRandomKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { hiprand_init(1234, idx, 0, &c->rState[idx]); } } __global__ void initRootCloneKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { c->ap->agentArray[idx].init(c, idx); c->context[idx] = &c->ap->agentArray[idx]; c->cloneFlags[idx] = false; } if (idx == 0) c->numElem = numElemLocal; } void SocialForceSimApp::initRootClone(SocialForceClone* cHost, SocialForceClone* cDev) { cHost->numElem = NUM_CAP; int gSize = GRID_SIZE(NUM_CAP); initRandomKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); initRootCloneKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); }
e483c28a4f289a08077736ec74518cab09c6b0eb.cu
#include "cuda_runtime.h" #include <fstream> #include "SocialForceGPU2.h" #include <omp.h> __global__ void testFunc() { } namespace NeighborModule { __device__ int zcode(int x, int y) { //return x * NUM_CELL + y; x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 y &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 y = (y ^ (y << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 y = (y ^ (y << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 return x | (y << 1); } __device__ int zcode(const double2 &loc) { int ix = loc.x / (ENV_DIM / NUM_CELL); int iy = loc.y / (ENV_DIM / NUM_CELL); return zcode(ix, iy); } __device__ int zcode(SocialForceAgent *agent) { return zcode(agent->data.loc); } __device__ void swap(SocialForceAgent** agentPtrs, int a, int b) { SocialForceAgent* temp = agentPtrs[a]; agentPtrs[a] = agentPtrs[b]; agentPtrs[b] = temp; } __device__ void quickSortByAgentLoc(SocialForceAgent** agentPtrs, curandState &rState, int l, int r) { if (l == r) return; int pi = l + curand(&rState) % (r - l); swap(agentPtrs, l, pi); SocialForceAgent* pivot = agentPtrs[l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (zcode(agentPtrs[j]) < zcode(pivot)) { swap(agentPtrs, i, j); i++; } } swap(agentPtrs, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, i, r); } __global__ void sortAgentByLocKernel(SocialForceAgent** agentPtrsToSort, curandState *rState, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; curandState &rStateLocal = *rState; if (idx == 0) quickSortByAgentLoc(agentPtrsToSort, rStateLocal, 0, numCap); } __global__ void setCidStartEndKernel(SocialForceAgent** contextSorted, int* cidStarts, int* cidEnds, int numCap) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numCap && idx > 0) { int cid = zcode(contextSorted[idx]); int cidPrev = zcode(contextSorted[idx - 1]); if (cid != cidPrev) { cidStarts[cid] = idx; cidEnds[cidPrev] = idx; } } if (idx == 0) { int cid = zcode(contextSorted[0]); cidStarts[cid] = 0; cid = zcode(contextSorted[numCap - 1]); cidEnds[cid] = numCap; } } } extern "C" void runTest() { testFunc << <32, 32 >> >(); } /* helper functions and data structures*/ #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString(err)); exit(-1); } } namespace APUtil { __global__ void hookPointerAndDataKernel(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numCap) agentPtrArray[index] = &agentArray[index]; } }; extern "C" void hookPointerAndData(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int gSize = GRID_SIZE(numCap); APUtil::hookPointerAndDataKernel << <gSize, BLOCK_SIZE >> >(agentPtrArray, agentArray, numCap); } __device__ double SocialForceAgent::correctCrossBoader(double val, double limit) { if (val >= limit) return limit - 0.001; else if (val < 0) return 0; return val; } void SocialForceAgent::computeIndivSocialForceRoom(const SocialForceAgentData &myData, const SocialForceAgentData &otherData, double2 &fSum){ double cMass = 100; //my data const double2& loc = myData.loc; const double2& goal = myData.goal; const double2& velo = myData.velocity; const double& v0 = myData.v0; const double& mass = myData.mass; //other's data const double2& locOther = otherData.loc; const double2& goalOther = otherData.goal; const double2& veloOther = otherData.velocity; const double& v0Other = otherData.v0; const double& massOther = otherData.mass; double d = 1e-15 + sqrt((loc.x - locOther.x) * (loc.x - locOther.x) + (loc.y - locOther.y) * (loc.y - locOther.y)); double dDelta = mass / cMass + massOther / cMass - d; double fExp = A * exp(dDelta / B); double fKg = dDelta < 0 ? 0 : k1 *dDelta; double nijx = (loc.x - locOther.x) / d; double nijy = (loc.y - locOther.y) / d; double fnijx = (fExp + fKg) * nijx; double fnijy = (fExp + fKg) * nijy; double fkgx = 0; double fkgy = 0; if (dDelta > 0) { double tix = -nijy; double tiy = nijx; fkgx = k2 * dDelta; fkgy = k2 * dDelta; double vijDelta = (veloOther.x - velo.x) * tix + (veloOther.y - velo.y) * tiy; fkgx = fkgx * vijDelta * tix; fkgy = fkgy * vijDelta * tiy; } fSum.x += fnijx + fkgx; fSum.y += fnijy + fkgy; } __device__ void SocialForceAgent::computeForceWithWall(const SocialForceAgentData &dataLocal, obstacleLine &wall, const int &cMass, double2 &fSum) { double2 wl = make_double2(wall.ex - wall.sx, wall.ey - wall.sy); if (length(wl) == 0) return; double diw, crx, cry; const double2 &loc = dataLocal.loc; diw = wall.pointToLineDist(loc, crx, cry); double virDiw = DIST(loc.x, loc.y, crx, cry); if (virDiw == 0) return; double niwx = (loc.x - crx) / virDiw; double niwy = (loc.y - cry) / virDiw; double drw = dataLocal.mass / cMass - diw; double fiw1 = A * exp(drw / B); if (drw > 0) fiw1 += k1 * drw; double fniwx = fiw1 * niwx; double fniwy = fiw1 * niwy; double fiwKgx = 0, fiwKgy = 0; if (drw > 0) { double fiwKg = k2 * drw * (dataLocal.velocity.x * (-niwy) + dataLocal.velocity.y * niwx); fiwKgx = fiwKg * (-niwy); fiwKgy = fiwKg * niwx; } fSum.x += fniwx - fiwKgx; fSum.y += fniwy - fiwKgy; } __device__ void SocialForceAgent::computeWallImpaction(const SocialForceAgentData &dataLocal, obstacleLine &wall, const double2 &newVelo, const double &tick, double &mint){ double crx, cry, tt; const double2 &loc = dataLocal.loc; int ret = wall.intersection2LineSeg( loc.x, loc.y, loc.x + 0.5 * newVelo.x * tick, loc.y + 0.5 * newVelo.y * tick, crx, cry ); if (ret == 1) { if (fabs(crx - loc.x) > 0) tt = (crx - loc.x) / (newVelo.x * tick); else tt = (crx - loc.y) / (newVelo.y * tick + 1e-20); if (tt < mint) mint = tt; } } __device__ void SocialForceAgent::computeDirection(const SocialForceAgentData &dataLocal, double2 &dvt) { //my data const double2& loc = dataLocal.loc; const double2& goal = dataLocal.goal; const double2& velo = dataLocal.velocity; const double& v0 = dataLocal.v0; const double& mass = dataLocal.mass; dvt.x = 0; dvt.y = 0; double2 diff; diff.x = 0; diff.y = 0; double d0 = sqrt((loc.x - goal.x) * (loc.x - goal.x) + (loc.y - goal.y) * (loc.y - goal.y)); diff.x = v0 * (goal.x - loc.x) / d0; diff.y = v0 * (goal.y - loc.y) / d0; dvt.x = (diff.x - velo.x) / tao; dvt.y = (diff.y - velo.y) / tao; } __device__ int sharedMinAndMax(int value, bool minFlag) { for (int i = 16; i >= 1; i /= 2) { if (minFlag) value = min(value, __shfl_xor(value, i, 32)); else value = max(value, __shfl_xor(value, i, 32)); } return value; } __device__ void SocialForceAgent::computeSocialForceRoom(SocialForceAgentData &dataLocal, double2 &fSum) { //__shared__ SocialForceAgentData sdata[BLOCK_SIZE]; fSum.x = 0; fSum.y = 0; double ds = 0; int neighborCount = 0; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 31; int cidStart = 0; int cidEnd = NUM_CAP; //while (cidStart < cidEnd) { //if (cidStart + threadIdx.x < cidEnd) { // SocialForceAgent *other = myClone->context[cidStart + threadIdx.x]; // sdata[threadIdx.x] = other->data; //} //int iterCount = cidEnd - cidStart > BLOCK_SIZE ? BLOCK_SIZE : cidEnd - cidStart; for (int i = 0; i < NUM_CAP; i++) { SocialForceAgentData otherData = myClone->context[i]->data; ds = length(otherData.loc - dataLocal.loc); if (ds < 6 && ds > 0) { neighborCount++; computeIndivSocialForceRoom(dataLocal, otherData, fSum); } } // cidStart += BLOCK_SIZE; //} /* for (int i = 0; i < NUM_CAP; i++) { SocialForceAgent *other = myClone->context[i]; SocialForceAgentData otherData = other->data; ds = length(otherData.loc - dataLocal.loc); if (ds < 6 && ds > 0) { neighborCount++; computeIndivSocialForceRoom(dataLocal, otherData, fSum); } } */ dataLocal.numNeighbor = neighborCount; } __device__ void SocialForceAgent::chooseNewGoal(const double2 &newLoc, double epsilon, double2 &newGoal) { double2 oldGoal = newGoal; double2 center = make_double2(ENV_DIM / 2, ENV_DIM / 2); if (newLoc.x < center.x && newLoc.y <= center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } else if (newLoc.x <= center.x && newLoc.y > center.y) { newGoal.x = 0.3 * ENV_DIM; newGoal.y = 0.5 * ENV_DIM; } else if (newLoc.x > center.x && newLoc.y > center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.7 * ENV_DIM; } else if (newLoc.x >= center.x && newLoc.y < center.y){ newGoal.x = 0.9 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } } __device__ void SocialForceAgent::step(){ double cMass = 100; const double2& loc = data.loc; const double2& goal = data.goal; const double2& velo = data.velocity; const double& v0 = data.v0; const double& mass = data.mass; //compute the direction double2 dvt; computeDirection(data, dvt); //compute force with other agents double2 fSum; computeSocialForceRoom(data, fSum); //compute force with walls and gates for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeForceWithWall(data, wall, cMass, fSum); } //sum up dvt.x += fSum.x / mass; dvt.y += fSum.y / mass; double2 newVelo = data.velocity; double2 newLoc = data.loc; double2 newGoal = data.goal; double tick = 0.1; newVelo.x += dvt.x * tick * (1);// + this->random->gaussian() * 0.1); newVelo.y += dvt.y * tick * (1);// + this->random->gaussian() * 0.1); double dv = sqrt(newVelo.x * newVelo.x + newVelo.y * newVelo.y); if (dv > maxv) { newVelo.x = newVelo.x * maxv / dv; newVelo.y = newVelo.y * maxv / dv; } double mint = 1; for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeWallImpaction(data, wall, newVelo, tick, mint); } newVelo.x *= mint; newVelo.y *= mint; newLoc.x += newVelo.x * tick; newLoc.y += newVelo.y * tick; double goalTemp = goal.x; chooseNewGoal(newLoc, mass / cMass, newGoal); newLoc.x = correctCrossBoader(newLoc.x, ENV_DIM); newLoc.y = correctCrossBoader(newLoc.y, ENV_DIM); dataCopy = data; dataCopy.loc = newLoc; dataCopy.velocity = newVelo; dataCopy.goal = newGoal; } __device__ void SocialForceAgent::init(SocialForceClone* c, int idx) { this->contextId = idx; //this->myOrigin = NULL; this->goalIdx = 0; this->myClone = c; curandState_t rStateLocal = c->rState[idx]; this->color.x = curand(&rStateLocal) % 256; this->color.y = curand(&rStateLocal) % 256; this->color.z = curand(&rStateLocal) % 256; this->color.w = curand(&rStateLocal) % 256; SocialForceAgentData & dataLocal = this->data; //= &sfModel->originalAgents->dataArray[dataSlot]; float rx = (float)(idx / 32) / (float)32; float ry = (float)(idx % 32) / (float)32; dataLocal.loc.x = (0.6 + 0.1 * curand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.loc.y = (0.5 + 0.4 * curand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.velocity.x = 2;//4 * (this->random->uniform()-0.5); dataLocal.velocity.y = 2;//4 * (this->random->uniform()-0.5); dataLocal.v0 = 2; dataLocal.mass = 50; dataLocal.numNeighbor = 0; //chooseNewGoal(dataLocal.loc, 0, dataLocal.goal); dataLocal.goal = make_double2(0.5 * ENV_DIM, 0.7 * ENV_DIM); this->dataCopy = dataLocal; } __device__ void SocialForceAgent::initNewClone(SocialForceAgent *parent, SocialForceClone *childClone) { this->color = childClone->color; this->contextId = parent->contextId; //this->myOrigin = parent; this->myClone = childClone; this->goalIdx = parent->goalIdx; for (int i = 0; i < NUM_GOAL; i++) this->goalSeq[i] = parent->goalSeq[i]; this->data = parent->data; this->dataCopy = parent->dataCopy; } namespace clone { __global__ void stepKernel(SocialForceClone *c, int numElemLocal) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElemLocal) c->ap->agentPtrArray[index]->step(); } __global__ void swapKernel(SocialForceClone *c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; agent.data = agent.dataCopy; } } } void SocialForceClone::step(int stepCount) { if (numElem == 0) return; int gSize; //alterGate(stepCount); /* cudaMemcpyAsync(contextSorted, context, sizeof(SocialForceAgent*) * NUM_CAP, cudaMemcpyDeviceToDevice, myStream); cudaStreamSynchronize(myStream); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->contextSorted, this->rState, NUM_CAP); cudaMemsetAsync(cidStarts, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); cudaMemsetAsync(cidEnds, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); cudaStreamSynchronize(myStream); gSize = GRID_SIZE(NUM_CAP); NeighborModule::setCidStartEndKernel<<<gSize, BLOCK_SIZE, 0, myStream>>>(contextSorted, cidStarts, cidEnds, NUM_CAP); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->apHost->agentPtrArray, this->rState, this->numElem); */ gSize = GRID_SIZE(numElem); size_t smemSize = sizeof(SocialForceAgentData) * BLOCK_SIZE; clone::stepKernel << <gSize, BLOCK_SIZE, smemSize, myStream >> >(selfDev, numElem); //clone::stepKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::swap() { if (numElem == 0) return; int gSize = GRID_SIZE(numElem); clone::swapKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::alterGate(int stepCount) { bool changed = false; for (int i = 0; i < NUM_PARAM; i++) { if (cloneParams[i] == stepCount) { changed = true; gates[i].init(0, 0, 0, 0); //cudaMemcpyAsync(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), cudaMemcpyHostToDevice, myStream); cudaMemcpy(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), cudaMemcpyHostToDevice); } } } namespace AppUtil { __device__ bool cloningCondition(SocialForceAgent *agent, SocialForceClone *parentClone, SocialForceClone *childClone) { // if agent has been cloned? if (childClone->cloneFlags[agent->contextId] == true) return false; // active cloning condition double2 &loc = agent->data.loc; for (int i = 0; i < NUM_PARAM; i++) { int param1 = parentClone->cloneParams[i]; int param2 = childClone->cloneParams[i]; if (param1 != param2) { obstacleLine g1 = parentClone->gates[i]; obstacleLine g2 = childClone->gates[i]; if (g1.pointToLineDist(loc) < 6) return true; if (g2.pointToLineDist(loc) < 6) return true; } } // passive cloning condition #define MY_MAX(a, b) (a > b ? a : b) #define MY_MIN(a, b) (a < b ? a : b) int minx = MY_MAX((loc.x - RADIUS_I) / CELL_DIM, 0); int miny = MY_MAX((loc.y - RADIUS_I) / CELL_DIM, 0); int maxx = MY_MIN((loc.x + RADIUS_I) / CELL_DIM, NUM_CELL - 1); int maxy = MY_MIN((loc.y + RADIUS_I) / CELL_DIM, NUM_CELL - 1); for (int i = minx; i <= maxx; i++) for (int j = miny; j <= maxy; j++) if (childClone->takenMap[i * NUM_CELL + j]) return true; // pass all the check, don't need to be cloned return false; } __global__ void updateContextKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent *agent = c->ap->agentPtrArray[idx]; c->context[agent->contextId] = agent; } } __global__ void constructPassiveMap(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; int takenId = agent.data.loc.x / CELL_DIM; takenId = takenId * NUM_CELL + agent.data.loc.y / CELL_DIM; c->takenMap[takenId] = true; } } __global__ void performCloningKernel(SocialForceClone *p, SocialForceClone *c, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < p->numElem) { SocialForceAgent *agent = p->ap->agentPtrArray[idx]; if (cloningCondition(agent, p, c)) { uint lastNum = atomicInc(&c->numElem, numCap); SocialForceAgent& childAgent = *c->ap->agentPtrArray[lastNum]; c->ap->takenFlags[lastNum] = true; childAgent.initNewClone(agent, c); c->context[childAgent.contextId] = &childAgent; c->cloneFlags[childAgent.contextId] = true; //c->numElem++; /* not written back */ } } } __global__ void compareAndEliminateKernel(SocialForceClone *p, SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &childAgent = *c->ap->agentPtrArray[idx]; SocialForceAgent &parentAgent = *p->context[childAgent.contextId]; // *(SocialForceAgent*)childAgent.myOrigin; double velDiff = length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity); double locDiff = length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc); if (locDiff == 0 && velDiff == 0) { c->ap->takenFlags[idx] = false; c->cloneFlags[childAgent.contextId] = false; } } } template<class T> __device__ void swap(T * ar, int a, int b) { T t1 = ar[a]; ar[a] = ar[b]; ar[b] = t1; } __global__ void reorderKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx == 0) { int l = 0; int r = numElem; int i = l, j = l; for (; j < r; j++) { if (c->ap->takenFlags[j] == true) { swap<SocialForceAgent*>(c->ap->agentPtrArray, i, j); swap<bool>(c->ap->takenFlags, i, j); i++; } } c->numElem = i; } } }; void SocialForceSimApp::performClone(SocialForceClone *parentClone, SocialForceClone *childClone) { childClone->parentCloneid = parentClone->cloneid; // 1. copy the context of parent clone cudaMemcpyAsync(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), cudaMemcpyDeviceToDevice, childClone->myStream); cudaStreamSynchronize(childClone->myStream); //cudaMemcpy(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), cudaMemcpyDeviceToDevice); getLastCudaError("perform clone"); // 2. update the context with agents of its own if (childClone->numElem > 0) { int gSize = GRID_SIZE(childClone->numElem); AppUtil::updateContextKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::updateContextKernel << <gSize, BLOCK_SIZE >> >(childClone->selfDev, childClone->numElem); } getLastCudaError("perform clone"); // 3. construct passive cloning map if (childClone->numElem > 0) { cudaMemsetAsync(childClone->selfDev->takenMap, 0, sizeof(bool) * NUM_CELL * NUM_CELL, childClone->myStream); cudaStreamSynchronize(childClone->myStream); //cudaMemset(childClone->selfDev->takenMap, 0, sizeof(bool) * NUM_CELL * NUM_CELL); int gSize = GRID_SIZE(childClone->numElem); AppUtil::constructPassiveMap << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::constructPassiveMap << <gSize, BLOCK_SIZE >> >(childClone->selfDev, childClone->numElem); } getLastCudaError("perform clone"); // 4. perform active and passive cloning (in cloningCondition checking) int gSize = GRID_SIZE(NUM_CAP); //AppUtil::performCloningKernel << <gSize, BLOCK_SIZE >> >(parentClone->selfDev, childClone->selfDev, NUM_CAP); AppUtil::performCloningKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, NUM_CAP); cudaMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), cudaMemcpyDeviceToHost, childClone->myStream); cudaStreamSynchronize(childClone->myStream); getLastCudaError("perform clone"); } void compareAndEliminateCPU(SocialForceClone *parentClone, SocialForceClone *childClone) { wchar_t message[20]; for (int i = 0; i < childClone->numElem; i++) { SocialForceAgent &childAgent = *childClone->ap->agentPtrArray[i]; SocialForceAgent parentAgent; // *(SocialForceAgent*)childAgent.myOrigin; if (length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity) == 0 && length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc) == 0) { childClone->ap->takenFlags[i] = false; childClone->cloneFlags[childAgent.contextId] = false; } /*else { if (childClone->cloneid == 4) { swprintf_s(message, 20, L"not false: %d\n", i); OutputDebugString(message); } }*/ } childClone->numElem = childClone->ap->reorder(childClone->numElem); } void SocialForceSimApp::compareAndEliminate(SocialForceClone *parentClone, SocialForceClone *childClone) { if (childClone->numElem == 0) return; int gSize = GRID_SIZE(childClone->numElem); AppUtil::compareAndEliminateKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); //AppUtil::compareAndEliminateKernel << <gSize, BLOCK_SIZE>> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); gSize = GRID_SIZE(NUM_CAP); AppUtil::reorderKernel << <1, 1, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::reorderKernel << <1, 1 >> >(childClone->selfDev, childClone->numElem); cudaMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), cudaMemcpyDeviceToHost, childClone->myStream); cudaStreamSynchronize(childClone->myStream); } void SocialForceSimApp::proc(int p, int c, bool o, char *s) { performClone(cAll[p], cAll[c]); cAll[c]->step(stepCount); if (o) { if (stepCount < 800) cAll[c]->output(stepCount, s); } compareAndEliminate(cAll[p], cAll[c]); } void swap(int **cloneTree, int a, int b) { int t1 = cloneTree[0][a]; cloneTree[0][a] = cloneTree[0][b]; cloneTree[0][b] = t1; t1 = cloneTree[1][a]; cloneTree[1][a] = cloneTree[1][b]; cloneTree[1][b] = t1; } void quickSort(int **cloneTree, int l, int r) { if (l == r) return; int pi = l + rand() % (r - l); swap(cloneTree, l, pi); int pivot = cloneTree[0][l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (cloneTree[0][j] < pivot) { swap(cloneTree, i, j); i++; } } swap(cloneTree, l, i - 1); quickSort(cloneTree, l, i - 1); quickSort(cloneTree, i, r); } void SocialForceSimApp::mst() { // clone diff matrix int **cloneDiff = new int*[totalClone]; for (int i = 0; i < totalClone; i++) { cloneDiff[i] = new int[totalClone]; for (int j = 0; j < totalClone; j++) cloneDiff[i][j] = 0; } for (int i = 0; i < totalClone; i++) { for (int j = 0; j < totalClone; j++) { for (int k = 0; k < NUM_PARAM; k++) { if (cAll[i]->cloneParams[k] != cAll[j]->cloneParams[k]) cloneDiff[i][j]++; } wchar_t message[20]; swprintf_s(message, 20, L"%d ", cloneDiff[i][j]); OutputDebugString(message); } OutputDebugString(L"\n"); } int *parent = cloneTree[0] = new int[totalClone]; int *child = cloneTree[1] = new int[totalClone]; int *key = new int[totalClone]; bool *mstSet = new bool[totalClone]; for (int i = 0; i < totalClone; i++) child[i] = i, key[i] = INT_MAX, mstSet[i] = false; key[0] = 0; parent[0] = -1; child[0] = 0; int count = 0; while (count++ < totalClone - 1) { int minKey = INT_MAX; int minIdx; for (int j = 0; j < totalClone; j++) if (mstSet[j] == false && key[j] < minKey) minKey = key[j], minIdx = j; mstSet[minIdx] = true; for (int j = 0; j < totalClone; j++) if (cloneDiff[minIdx][j] && mstSet[j] == false && cloneDiff[minIdx][j] < key[j]) parent[j] = minIdx, key[j] = cloneDiff[minIdx][j]; } quickSort(cloneTree, 0, totalClone); for (int i = 1; i < totalClone; i++) { wchar_t message[20]; swprintf_s(message, 20, L"%d - %d: %d\n", cloneTree[0][i], cloneTree[1][i], cloneDiff[i][parent[i]]); OutputDebugString(message); } delete mstSet; delete key; } __global__ void getLocAndColorKernel(SocialForceClone *c, double2 *loc, uchar4 *color, int *contextId, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { loc[idx] = c->context[idx]->data.loc; color[idx] = c->context[idx]->color; contextId[idx] = c->context[idx]->contextId; } } void SocialForceSimApp::getLocAndColorFromDevice(){ SocialForceClone *c = cAll[paintId]; int gSize = GRID_SIZE(NUM_CAP); getLocAndColorKernel << <gSize, BLOCK_SIZE >> >(c->selfDev, debugLocDev, debugColorDev, debugContextIdDev, NUM_CAP); cudaMemcpy(debugLocHost, debugLocDev, sizeof(double2) * NUM_CAP, cudaMemcpyDeviceToHost); cudaMemcpy(debugColorHost, debugColorDev, sizeof(uchar4) * NUM_CAP, cudaMemcpyDeviceToHost); cudaMemcpy(debugContextIdHost, debugContextIdDev, sizeof(int) * NUM_CAP, cudaMemcpyDeviceToHost); cudaMemcpy(c->takenMap, c->selfDev->takenMap, sizeof(bool) * NUM_CELL * NUM_CELL, cudaMemcpyDeviceToHost); //cudaMemcpy(debugCidStartsHost, c->cidStarts, sizeof(int) * NUM_CELL * NUM_CELL, cudaMemcpyDeviceToHost); //cudaMemcpy(debugCidEndsHost, c->cidEnds, sizeof(int) * NUM_CELL * NUM_CELL, cudaMemcpyDeviceToHost); //wchar_t message[128]; //for (int i = 0; i < NUM_CELL * NUM_CELL; i++) { // swprintf_s(message, L"(%d, %d) ", debugCidStartsHost[i], debugCidEndsHost[i]); // OutputDebugString(message); //} //OutputDebugString(L"\n"); } __global__ void initRandomKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { curand_init(1234, idx, 0, &c->rState[idx]); } } __global__ void initRootCloneKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { c->ap->agentArray[idx].init(c, idx); c->context[idx] = &c->ap->agentArray[idx]; c->cloneFlags[idx] = false; } if (idx == 0) c->numElem = numElemLocal; } void SocialForceSimApp::initRootClone(SocialForceClone* cHost, SocialForceClone* cDev) { cHost->numElem = NUM_CAP; int gSize = GRID_SIZE(NUM_CAP); initRandomKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); initRootCloneKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); }
c92900aff38270df45190bb996abd7194135c90a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by gxl on 2021/1/5. // #include "cc.cuh" void conventionParticipateCC(string ccPath) { cout << "===============conventionParticipateCC==============" << endl; uint testNumNodes = 0; ulong testNumEdge = 0; unsigned long transferSum = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); edgeList = new uint[testNumEdge]; infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); unsigned long max_partition_size; unsigned long total_gpu_size; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.9, sizeof(uint), 5); uint partitionNum; if (testNumEdge > max_partition_size) { partitionNum = testNumEdge / max_partition_size + 1; } else { partitionNum = 1; } uint *degree = new uint[testNumNodes]; uint *value = new uint[testNumNodes]; bool *isActiveNodeList = new bool[testNumNodes]; CommonPartitionInfo *partitionInfoList = new CommonPartitionInfo[partitionNum]; bool *needTransferPartition = new bool[partitionNum]; for (uint i = 0; i < testNumNodes; i++) { isActiveNodeList[i] = true; value[i] = i; if (i + 1 < testNumNodes) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } else { degree[i] = testNumEdge - nodePointersI[i]; } if (degree[i] > max_partition_size) { cout << "node " << i << " degree > maxPartition " << endl; return; } } for (uint i = 0; i < partitionNum; i++) { partitionInfoList[i].startVertex = -1; partitionInfoList[i].endVertex = -1; partitionInfoList[i].nodePointerOffset = -1; partitionInfoList[i].partitionEdgeSize = -1; } int tempPartitionIndex = 0; uint tempNodeIndex = 0; while (tempNodeIndex < testNumNodes) { if (partitionInfoList[tempPartitionIndex].startVertex == -1) { partitionInfoList[tempPartitionIndex].startVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].nodePointerOffset = nodePointersI[tempNodeIndex]; partitionInfoList[tempPartitionIndex].partitionEdgeSize = degree[tempNodeIndex]; tempNodeIndex++; } else { if (partitionInfoList[tempPartitionIndex].partitionEdgeSize + degree[tempNodeIndex] > max_partition_size) { tempPartitionIndex++; } else { partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].partitionEdgeSize += degree[tempNodeIndex]; tempNodeIndex++; } } } uint *degreeD; bool *isActiveNodeListD; bool *nextActiveNodeListD; uint *nodePointerListD; uint *partitionEdgeListD; uint *valueD; hipMalloc(&degreeD, testNumNodes * sizeof(uint)); hipMalloc(&valueD, testNumNodes * sizeof(uint)); hipMalloc(&isActiveNodeListD, testNumNodes * sizeof(bool)); hipMalloc(&nextActiveNodeListD, testNumNodes * sizeof(bool)); hipMalloc(&nodePointerListD, testNumNodes * sizeof(uint)); hipMalloc(&partitionEdgeListD, max_partition_size * sizeof(uint)); hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); hipMemcpy(nodePointerListD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); hipMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { for (int j = 0; j < testNumNodes; j++) { isActiveNodeList[j] = true; value[j] = j; } hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); uint activeSum = 0; int iteration = 0; auto startProcessing = std::chrono::steady_clock::now(); while (true) { uint activeNodeNum = 0; checkNeedTransferPartitionOpt(needTransferPartition, partitionInfoList, isActiveNodeList, partitionNum, testNumNodes, activeNodeNum); if (activeNodeNum <= 0) { break; } else { cout << "iteration " << iteration << " activeNodes " << activeNodeNum << endl; activeSum += activeNodeNum; } hipMemcpy(isActiveNodeListD, isActiveNodeList, testNumNodes * sizeof(bool), hipMemcpyHostToDevice); for (int j = 0; j < partitionNum; j++) { if (needTransferPartition[j]) { hipMemcpy(partitionEdgeListD, edgeList + partitionInfoList[j].nodePointerOffset, partitionInfoList[j].partitionEdgeSize * sizeof(uint), hipMemcpyHostToDevice); transferSum += partitionInfoList[j].partitionEdgeSize; hipLaunchKernelGGL(( ccKernel_CommonPartition), dim3(grid), dim3(block), 0, 0, partitionInfoList[j].startVertex, partitionInfoList[j].endVertex, partitionInfoList[j].nodePointerOffset, isActiveNodeListD, nodePointerListD, partitionEdgeListD, degreeD, valueD, nextActiveNodeListD); hipDeviceSynchronize(); gpuErrorcheck(hipPeekAtLastError()) } } hipMemcpy(isActiveNodeList, nextActiveNodeListD, testNumNodes * sizeof(bool), hipMemcpyDeviceToHost); hipMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); iteration++; } cout << "cpu transfer to gpu " << transferSum * sizeof(uint) << "byte" << endl; cout << " activeSum " << activeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << " finish time : " << durationRead << " ms" << endl; } free(nodePointersI); free(edgeList); free(degree); free(isActiveNodeList); hipFree(isActiveNodeListD); hipFree(nextActiveNodeListD); hipFree(nodePointerListD); hipFree(partitionEdgeListD); //todo free partitionInfoList needTransferPartition } int needCpu = 0; int notNeedCpu = 0; long processingTimeSum = 0; long cpuTimeSum = 0; long allTimeSum = 0; long validSwapSum = 0; int trestSum = 0; void ccShare(string ccPath) { uint testNumNodes = 0; ulong testNumEdge = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; gpuErrorcheck(hipMallocManaged(&nodePointersI, (testNumNodes + 1) * sizeof(uint))); infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); gpuErrorcheck(hipMallocManaged(&edgeList, (numEdge) * sizeof(uint))); hipMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), hipMemAdviseSetReadMostly, 0); hipMemAdvise(edgeList, (numEdge) * sizeof(uint), hipMemAdviseSetReadMostly, 0); infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); //preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge); auto endReadGraph = std::chrono::steady_clock::now(); long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>( endReadGraph - startReadGraph).count(); cout << "read graph time : " << durationReadGraph << "ms" << endl; int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { timeSum += ccCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList); //timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907); break; } cout << "need cpu " << needCpu << " not need cpu " << notNeedCpu << endl; cout << "processingTime " << processingTimeSum / testTimes << " cpu time " << cpuTimeSum / testTimes << " all Time " << allTimeSum / testTimes << endl; cout << "mean time is " << timeSum / testTimes << endl; cout << "mean validSwapSum is " << validSwapSum / testTimes << endl; cout << trestSum << endl; } long ccCaculateInShare(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList) { auto start = std::chrono::steady_clock::now(); uint *degree; uint *value; //uint *recordActiveNodes = new uint[testNumNodes]; gpuErrorcheck(hipMallocManaged(&degree, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMallocManaged(&value, testNumNodes * sizeof(uint))); auto startPreCaculate = std::chrono::steady_clock::now(); for (uint i = 0; i < testNumNodes - 1; i++) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; bool *label; gpuErrorcheck(hipMallocManaged(&label, testNumNodes * sizeof(bool))); for (uint i = 0; i < testNumNodes; i++) { label[i] = true; value[i] = i; } uint *activeNodeList; hipMallocManaged(&activeNodeList, testNumNodes * sizeof(uint)); //cacaulate the active node And make active node array uint *activeNodeLabelingD; gpuErrorcheck(hipMallocManaged(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int))); uint *activeNodeLabelingPrefixD; gpuErrorcheck(hipMallocManaged(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, label, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; auto startProcessing = std::chrono::steady_clock::now(); //vector<vector<uint>> visitRecordByIteration; while (activeNodesNum > 0) { iter++; thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeList, label, activeNodeLabelingPrefixD); hipLaunchKernelGGL(( setLabelDefault), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeList, label); hipLaunchKernelGGL(( cc_kernel), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeList, nodePointersI, degree, edgeList, value, label); hipDeviceSynchronize(); //visitRecordByIteration.push_back(countDataByIteration(testNumEdge, testNumNodes, nodePointersI, degree, activeNodeLabelingD)); gpuErrorcheck(hipPeekAtLastError()); hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, label, activeNodeLabelingD); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl; } hipDeviceSynchronize(); //writeTrunkVistInIteration(visitRecordByIteration, "./CountByIterationCC.txt"); cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl; hipFree(degree); hipFree(label); hipFree(value); hipFree(activeNodeList); hipFree(activeNodeLabelingD); hipFree(activeNodeLabelingPrefixD); return durationRead; } void ccKernelThread(uint staticNodeNum, uint *activeNodeListD, uint *staticNodePointerD, uint *degreeD, uint *staticEdgeListD, uint *valueD, uint *isActiveD1, uint *isActiveD2, bool *isFinishedManaged, dim3 grid, dim3 block, hipStream_t steamStatic) { uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; hipMemcpy(isFinishedManaged, &isFinishedHost, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cc_kernelStaticSwapOpt2Label), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, isFinishedManaged); hipDeviceSynchronize(); hipMemcpy(&isFinishedHost, isFinishedManaged, sizeof(bool), hipMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); } void ccOpt(string ccPath, float adviseK) { uint testNumNodes = 0; ulong testNumEdge = 0; uint *nodePointersI; uint *edgeList; bool isUseShare = true; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes + 1]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); edgeList = new uint[testNumEdge + 1]; infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); auto endReadGraph = std::chrono::steady_clock::now(); long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>( endReadGraph - startReadGraph).count(); cout << "read graph time : " << durationReadGraph << "ms" << endl; int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { //timeSum += ccCaculateCommonMemoryInnerAsync(testNumNodes, testNumEdge, nodePointersI, edgeList, adviseK); //break; timeSum += ccCaculateCommonMemoryInnerAsyncRandom(testNumNodes, testNumEdge, nodePointersI, edgeList, adviseK); cout << i << "========================================" << endl; } } struct TempConnectedComponent { uint index; uint nodeSum; uint edgeSum; }; long ccCaculateCommonMemoryInnerAsync(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, float adviseK) { cout << "=========ccCaculateCommonMemoryInnerAsync1========" << endl; ulong edgeIterationMax = 0; auto start = std::chrono::steady_clock::now(); auto startPreCaculate = std::chrono::steady_clock::now(); //CPU long durationRead; ulong transferSum = 0; unsigned long max_partition_size; unsigned long total_gpu_size; uint maxStaticNode = 0; uint *degree; uint *value; uint *label; bool *isInStatic; uint *overloadNodeList; uint *staticNodePointer; uint *activeNodeList; uint *activeOverloadNodePointers; vector<PartEdgeListInfo> partEdgeListInfoArr; /* * overloadEdgeList overload edge list in every iteration * */ uint *overloadEdgeList; FragmentData *fragmentData; bool isFromTail = true; //GPU uint *staticEdgeListD; uint *overloadEdgeListD; bool *isInStaticD; uint *overloadNodeListD; uint *staticNodePointerD; uint *nodePointerD; uint *degreeD; // async need two labels uint *isActiveD1; uint *isActiveD2; uint *isStaticActive; uint *isOverloadActive; uint *valueD; uint *activeNodeListD; uint *activeNodeLabelingPrefixD; uint *activeOverloadNodePointersD; uint *activeOverloadDegreeD; bool *isFinishedDevice; degree = new uint[testNumNodes]; value = new uint[testNumNodes]; label = new uint[testNumNodes]; isInStatic = new bool[testNumNodes]; overloadNodeList = new uint[testNumNodes]; staticNodePointer = new uint[testNumNodes]; activeNodeList = new uint[testNumNodes]; activeOverloadNodePointers = new uint[testNumNodes]; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15); //caculate degree uint meanDegree = testNumEdge / testNumNodes; cout << " meanDegree " << meanDegree << endl; uint degree0Sum = 0; for (uint i = 0; i < testNumNodes - 1; i++) { if (nodePointersI[i] > testNumEdge) { cout << i << " " << nodePointersI[i] << endl; break; } degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint)); //caculate static staticEdgeListD gpuErrorcheck(hipMalloc(&isFinishedDevice, 1 * sizeof(bool))); gpuErrorcheck(hipMalloc(&staticEdgeListD, max_partition_size * sizeof(uint))); auto startmove = std::chrono::steady_clock::now(); gpuErrorcheck(hipMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), hipMemcpyHostToDevice)); auto endMove = std::chrono::steady_clock::now(); long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>( endMove - startmove).count(); gpuErrorcheck(hipMalloc(&isInStaticD, testNumNodes * sizeof(bool))) gpuErrorcheck(hipMalloc(&overloadNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&staticNodePointerD, testNumNodes * sizeof(uint))) gpuErrorcheck(hipMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMalloc(&nodePointerD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemcpy(nodePointerD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) { isInStatic[i] = true; if (i > maxStaticNode) maxStaticNode = i; } else { isInStatic[i] = false; } } hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice); cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl; uint partOverloadSize = total_gpu_size - max_partition_size; uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1]; cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl; overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint)); gpuErrorcheck(hipMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint))); gpuErrorcheck(hipMalloc(&degreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isActiveD1, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isActiveD2, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isStaticActive, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isOverloadActive, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&valueD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); //setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1); thrust::device_ptr<unsigned int> ptr_labelingTest(isActiveD2); thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive); thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD); thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(activeOverloadNodePointersD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; ulong overloadEdgeSum = 0; auto startCpu = std::chrono::steady_clock::now(); auto endReadCpu = std::chrono::steady_clock::now(); long durationReadCpu = 0; auto startSwap = std::chrono::steady_clock::now(); auto endSwap = std::chrono::steady_clock::now(); long durationSwap = 0; auto startGpuProcessing = std::chrono::steady_clock::now(); auto endGpuProcessing = std::chrono::steady_clock::now(); long durationGpuProcessing = 0; auto startOverloadGpuProcessing = std::chrono::steady_clock::now(); auto endOverloadGpuProcessing = std::chrono::steady_clock::now(); long durationOverloadGpuProcessing = 0; auto startPreGpuProcessing = std::chrono::steady_clock::now(); auto endPreGpuProcessing = std::chrono::steady_clock::now(); long durationPreGpuProcessing = 0; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; hipStream_t steamStatic, streamDynamic; hipStreamCreate(&steamStatic); hipStreamCreate(&streamDynamic); auto startMemoryTraverse = std::chrono::steady_clock::now(); auto endMemoryTraverse = std::chrono::steady_clock::now(); long durationMemoryTraverse = 0; auto startProcessing = std::chrono::steady_clock::now(); //uint cursorStartSwap = staticFragmentNum + 1; uint swapValidNodeSum = 0; uint swapValidEdgeSum = 0; uint swapNotValidNodeSum = 0; uint swapNotValidEdgeSum = 0; uint visitEdgeSum = 0; uint swapInEdgeSum = 0; uint headSum; uint tailSum; while (activeNodesNum > 0) { iter++; //cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl; startPreGpuProcessing = std::chrono::steady_clock::now(); //cleanStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isStaticActive, isOverloadActive); hipLaunchKernelGGL(( setStaticAndOverloadLabel), dim3(grid), dim3(block), 0, 0, testNumNodes, isActiveD1, isStaticActive, isOverloadActive, isInStaticD); uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes); if (staticNodeNum > 0) { //cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl; thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setStaticActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, isStaticActive, activeNodeLabelingPrefixD); } uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes); uint overloadEdgeNum = 0; if (overloadNodeNum > 0) { //cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl; thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setOverloadNodePointerSwap), dim3(grid), dim3(block), 0, 0, testNumNodes, overloadNodeListD, activeOverloadDegreeD, isOverloadActive, activeNodeLabelingPrefixD, degreeD); thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD); overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, 0); //cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl; overloadEdgeSum += overloadEdgeNum; if (overloadEdgeNum > edgeIterationMax) { edgeIterationMax = overloadEdgeNum; } } endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); startGpuProcessing = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( mixDynamicPartLabel), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, 0, activeNodeListD, isActiveD1, isActiveD2); thread staticCCKernel = thread(ccKernelThread, staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, isActiveD1, isActiveD2, isFinishedDevice, grid, block, steamStatic); /*if (staticCCKernel.joinable()) { staticCCKernel.join(); }*/ if (overloadNodeNum > 0) { startCpu = std::chrono::steady_clock::now(); /*hipMemcpyAsync(activeNodeList, activeNodeListD, activeNodesNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic);*/ hipMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic); hipMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic); int threadNum = 20; if (overloadNodeNum < 50) { threadNum = 1; } thread runThreads[threadNum]; for (int i = 0; i < threadNum; i++) { runThreads[i] = thread(fillDynamic, i, threadNum, 0, overloadNodeNum, degree, activeOverloadNodePointers, nodePointersI, overloadNodeList, overloadEdgeList, edgeList); } for (unsigned int t = 0; t < threadNum; t++) { runThreads[t].join(); } caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr, overloadNodeNum, partOverloadSize, overloadEdgeNum); endReadCpu = std::chrono::steady_clock::now(); durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count(); if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); for (auto &i : partEdgeListInfoArr) { startMemoryTraverse = std::chrono::steady_clock::now(); gpuErrorcheck(hipMemcpy(overloadEdgeListD, overloadEdgeList + activeOverloadNodePointers[i.partStartIndex], i.partEdgeNums * sizeof(uint), hipMemcpyHostToDevice)) transferSum += i.partEdgeNums; endMemoryTraverse = std::chrono::steady_clock::now(); durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>( endMemoryTraverse - startMemoryTraverse).count(); /*cout << "iter " << iter << " part " << i << " durationMemoryTraverse " << durationMemoryTraverse << endl;*/ startOverloadGpuProcessing = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( mixDynamicPartLabel), dim3(grid), dim3(block), 0, streamDynamic, i.partActiveNodeNums, i.partStartIndex, overloadNodeListD, isActiveD1, isActiveD2); uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; hipMemcpy(isFinishedDevice, &isFinishedHost, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cc_kernelDynamicSwap2Label), dim3(grid), dim3(block), 0, streamDynamic, i.partStartIndex, i.partActiveNodeNums, overloadNodeListD, degreeD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, overloadEdgeListD, activeOverloadNodePointersD, isFinishedDevice); hipDeviceSynchronize(); hipMemcpy(&isFinishedHost, isFinishedDevice, sizeof(bool), hipMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); endOverloadGpuProcessing = std::chrono::steady_clock::now(); durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endOverloadGpuProcessing - startOverloadGpuProcessing).count(); /*cout << "iter " << iter << " part " << i << " durationOverloadGpuProcessing " << durationOverloadGpuProcessing << endl;*/ } gpuErrorcheck(hipPeekAtLastError()) } else { if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); } hipLaunchKernelGGL(( mixCommonLabel), dim3(grid), dim3(block), 0, streamDynamic, testNumNodes, isActiveD1, isActiveD2); //hipDeviceSynchronize(); //cout << "mixDynamicPartLabel" << " =========hipDeviceSynchronize()==========" << endl; //hipMemcpy(label, isActiveD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost); startPreGpuProcessing = std::chrono::steady_clock::now(); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); } hipDeviceSynchronize(); hipMemcpy(value, valueD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost); transferSum += max_partition_size; cout << "transferSum: " << transferSum * 4 << "byte" << endl; cout << "iterationSum " << iter << endl; double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter; double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge; cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl; cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "finish time : " << durationRead << " ms" << endl; cout << "total time : " << durationRead + testDuration << " ms" << endl; cout << "cpu time : " << durationReadCpu << " ms" << endl; cout << "pre fact processing time : " << durationGpuProcessing << " ms" << endl; cout << "overload fact processing time : " << durationOverloadGpuProcessing << " ms" << endl; cout << "durationMemoryTraverse : " << durationMemoryTraverse << " ms" << endl; cout << "durationOverloadGpuProcessing : " << durationOverloadGpuProcessing << " ms" << endl; cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl; cout << "swap processing time : " << durationSwap << " ms" << endl; cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl; cout << "swapValidNodeSum " << swapValidNodeSum << " swapValidEdgeSum " << swapValidEdgeSum << endl; cout << "swapNotValidNodeSum " << swapNotValidNodeSum << " swapNotValidEdgeSum " << swapNotValidEdgeSum << " visitSum " << visitEdgeSum << " swapInEdgeSum " << swapInEdgeSum << endl; cout << "headSum " << headSum << " tailSum " << tailSum << endl; /*hipFree(nodePointerD); hipFree(staticEdgeListD); hipFree(degreeD); hipFree(isActiveD1); hipFree(isActiveD2); hipFree(valueD); hipFree(activeNodeListD); hipFree(activeNodeLabelingPrefixD); hipFree(activeOverloadNodePointersD); hipFree(activeOverloadDegreeD); hipFree(isInStaticD); hipFree(staticNodePointerD); hipFree(overloadNodeListD); delete[] label; delete[] degree; delete[] value; delete[] activeNodeList; delete[] activeOverloadNodePointers; delete[] isInStatic; delete[] overloadNodeList; delete[] staticNodePointer; delete[] fragmentData; return durationRead;*/ } void conventionParticipateCCInLong() { cout << "===============conventionParticipateCCInLong==============" << endl; uint testNumNodes = 0; ulong testNumEdge = 0; unsigned long transferSum = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(testGraphPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); edgeList = new uint[testNumEdge]; infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); unsigned long max_partition_size; unsigned long total_gpu_size; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.9, sizeof(uint), 5); uint partitionNum; if (testNumEdge > max_partition_size) { partitionNum = testNumEdge / max_partition_size + 1; } else { partitionNum = 1; } uint *degree = new uint[testNumNodes]; uint *value = new uint[testNumNodes]; bool *isActiveNodeList = new bool[testNumNodes]; CommonPartitionInfo *partitionInfoList = new CommonPartitionInfo[partitionNum]; bool *needTransferPartition = new bool[partitionNum]; for (uint i = 0; i < testNumNodes; i++) { isActiveNodeList[i] = true; value[i] = i; if (i + 1 < testNumNodes) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } else { degree[i] = testNumEdge - nodePointersI[i]; } if (degree[i] > max_partition_size) { cout << "node " << i << " degree > maxPartition " << endl; return; } } for (uint i = 0; i < partitionNum; i++) { partitionInfoList[i].startVertex = -1; partitionInfoList[i].endVertex = -1; partitionInfoList[i].nodePointerOffset = -1; partitionInfoList[i].partitionEdgeSize = -1; } int tempPartitionIndex = 0; uint tempNodeIndex = 0; while (tempNodeIndex < testNumNodes) { if (partitionInfoList[tempPartitionIndex].startVertex == -1) { partitionInfoList[tempPartitionIndex].startVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].nodePointerOffset = nodePointersI[tempNodeIndex]; partitionInfoList[tempPartitionIndex].partitionEdgeSize = degree[tempNodeIndex]; tempNodeIndex++; } else { if (partitionInfoList[tempPartitionIndex].partitionEdgeSize + degree[tempNodeIndex] > max_partition_size) { tempPartitionIndex++; } else { partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].partitionEdgeSize += degree[tempNodeIndex]; tempNodeIndex++; } } } uint *degreeD; bool *isActiveNodeListD; bool *nextActiveNodeListD; uint *nodePointerListD; uint *partitionEdgeListD; uint *valueD; hipMalloc(&degreeD, testNumNodes * sizeof(uint)); hipMalloc(&valueD, testNumNodes * sizeof(uint)); hipMalloc(&isActiveNodeListD, testNumNodes * sizeof(bool)); hipMalloc(&nextActiveNodeListD, testNumNodes * sizeof(bool)); hipMalloc(&nodePointerListD, testNumNodes * sizeof(uint)); hipMalloc(&partitionEdgeListD, max_partition_size * sizeof(uint)); hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); hipMemcpy(nodePointerListD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); hipMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { for (int j = 0; j < testNumNodes; j++) { isActiveNodeList[j] = true; value[j] = j; } hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); uint activeSum = 0; int iteration = 0; auto startProcessing = std::chrono::steady_clock::now(); while (true) { uint activeNodeNum = 0; checkNeedTransferPartition(needTransferPartition, partitionInfoList, isActiveNodeList, partitionNum, testNumNodes, activeNodeNum); if (activeNodeNum <= 0) { break; } else { cout << "iteration " << iteration << " activeNodes " << activeNodeNum << endl; activeSum += activeNodeNum; } hipMemcpy(isActiveNodeListD, isActiveNodeList, testNumNodes * sizeof(bool), hipMemcpyHostToDevice); for (int j = 0; j < partitionNum; j++) { if (needTransferPartition[j]) { hipMemcpy(partitionEdgeListD, edgeList + partitionInfoList[j].nodePointerOffset, partitionInfoList[j].partitionEdgeSize * sizeof(uint), hipMemcpyHostToDevice); transferSum += partitionInfoList[j].partitionEdgeSize; hipLaunchKernelGGL(( ccKernel_CommonPartition), dim3(grid), dim3(block), 0, 0, partitionInfoList[j].startVertex, partitionInfoList[j].endVertex, partitionInfoList[j].nodePointerOffset, isActiveNodeListD, nodePointerListD, partitionEdgeListD, degreeD, valueD, nextActiveNodeListD); hipDeviceSynchronize(); gpuErrorcheck(hipPeekAtLastError()) } } hipMemcpy(isActiveNodeList, nextActiveNodeListD, testNumNodes * sizeof(bool), hipMemcpyDeviceToHost); hipMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); iteration++; } cout << "cpu transfer to gpu " << transferSum * sizeof(uint) << "byte" << endl; cout << " activeSum " << activeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << " finish time : " << durationRead << " ms" << endl; } free(nodePointersI); free(edgeList); free(degree); free(isActiveNodeList); hipFree(isActiveNodeListD); hipFree(nextActiveNodeListD); hipFree(nodePointerListD); hipFree(partitionEdgeListD); } long ccCaculateCommonMemoryInnerAsyncRecordVisit(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, float adviseK) { cout << "=========ccCaculateCommonMemoryInnerAsync1========" << endl; ulong edgeIterationMax = 0; auto start = std::chrono::steady_clock::now(); auto startPreCaculate = std::chrono::steady_clock::now(); //CPU long durationRead; ulong transferSum = 0; unsigned long max_partition_size; unsigned long total_gpu_size; uint maxStaticNode = 0; uint *degree; uint *value; uint *label; bool *isInStatic; uint *overloadNodeList; uint *staticNodePointer; uint *activeNodeList; uint *activeOverloadNodePointers; vector<PartEdgeListInfo> partEdgeListInfoArr; /* * overloadEdgeList overload edge list in every iteration * */ uint *overloadEdgeList; FragmentData *fragmentData; bool isFromTail = true; //GPU uint *staticEdgeListD; uint *overloadEdgeListD; bool *isInStaticD; uint *overloadNodeListD; uint *staticNodePointerD; uint *nodePointerD; uint *degreeD; // async need two labels uint *isActiveD1; uint *isActiveD2; uint *isStaticActive; uint *isOverloadActive; uint *valueD; uint *activeNodeListD; uint *activeNodeLabelingPrefixD; uint *activeOverloadNodePointersD; uint *activeOverloadDegreeD; bool *isFinishedDevice; uint *vertexVisitRecord; uint *vertexVisitRecordD; vertexVisitRecord = new uint[testNumNodes]; hipMalloc(&vertexVisitRecordD, testNumNodes * sizeof(uint)); hipMemset(vertexVisitRecordD, 0, testNumNodes * sizeof(uint)); degree = new uint[testNumNodes]; value = new uint[testNumNodes]; label = new uint[testNumNodes]; isInStatic = new bool[testNumNodes]; overloadNodeList = new uint[testNumNodes]; staticNodePointer = new uint[testNumNodes]; activeNodeList = new uint[testNumNodes]; activeOverloadNodePointers = new uint[testNumNodes]; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15); //caculate degree uint meanDegree = testNumEdge / testNumNodes; cout << " meanDegree " << meanDegree << endl; uint degree0Sum = 0; for (uint i = 0; i < testNumNodes - 1; i++) { if (nodePointersI[i] > testNumEdge) { cout << i << " " << nodePointersI[i] << endl; break; } degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint)); //caculate static staticEdgeListD gpuErrorcheck(hipMalloc(&isFinishedDevice, 1 * sizeof(bool))); gpuErrorcheck(hipMalloc(&staticEdgeListD, max_partition_size * sizeof(uint))); auto startmove = std::chrono::steady_clock::now(); gpuErrorcheck(hipMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), hipMemcpyHostToDevice)); auto endMove = std::chrono::steady_clock::now(); long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>( endMove - startmove).count(); gpuErrorcheck(hipMalloc(&isInStaticD, testNumNodes * sizeof(bool))) gpuErrorcheck(hipMalloc(&overloadNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&staticNodePointerD, testNumNodes * sizeof(uint))) gpuErrorcheck(hipMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMalloc(&nodePointerD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemcpy(nodePointerD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) { isInStatic[i] = true; if (i > maxStaticNode) maxStaticNode = i; } else { isInStatic[i] = false; } } hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice); cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl; uint partOverloadSize = total_gpu_size - max_partition_size; uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1]; cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl; overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint)); gpuErrorcheck(hipMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint))); gpuErrorcheck(hipMalloc(&degreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isActiveD1, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isActiveD2, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isStaticActive, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isOverloadActive, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&valueD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); //setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1); thrust::device_ptr<unsigned int> ptr_labelingTest(isActiveD2); thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive); thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD); thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(activeOverloadNodePointersD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; ulong overloadEdgeSum = 0; auto startCpu = std::chrono::steady_clock::now(); auto endReadCpu = std::chrono::steady_clock::now(); long durationReadCpu = 0; auto startSwap = std::chrono::steady_clock::now(); auto endSwap = std::chrono::steady_clock::now(); long durationSwap = 0; auto startGpuProcessing = std::chrono::steady_clock::now(); auto endGpuProcessing = std::chrono::steady_clock::now(); long durationGpuProcessing = 0; auto startOverloadGpuProcessing = std::chrono::steady_clock::now(); auto endOverloadGpuProcessing = std::chrono::steady_clock::now(); long durationOverloadGpuProcessing = 0; auto startPreGpuProcessing = std::chrono::steady_clock::now(); auto endPreGpuProcessing = std::chrono::steady_clock::now(); long durationPreGpuProcessing = 0; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; hipStream_t steamStatic, streamDynamic; hipStreamCreate(&steamStatic); hipStreamCreate(&streamDynamic); auto startMemoryTraverse = std::chrono::steady_clock::now(); auto endMemoryTraverse = std::chrono::steady_clock::now(); long durationMemoryTraverse = 0; auto startProcessing = std::chrono::steady_clock::now(); //uint cursorStartSwap = staticFragmentNum + 1; uint swapValidNodeSum = 0; uint swapValidEdgeSum = 0; uint swapNotValidNodeSum = 0; uint swapNotValidEdgeSum = 0; uint visitEdgeSum = 0; uint swapInEdgeSum = 0; uint headSum; uint tailSum; while (activeNodesNum > 0) { iter++; //cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl; startPreGpuProcessing = std::chrono::steady_clock::now(); //cleanStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isStaticActive, isOverloadActive); hipLaunchKernelGGL(( setStaticAndOverloadLabelAndRecord), dim3(grid), dim3(block), 0, 0, testNumNodes, isActiveD1, isStaticActive, isOverloadActive, isInStaticD, vertexVisitRecordD); uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes); if (staticNodeNum > 0) { //cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl; thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setStaticActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, isStaticActive, activeNodeLabelingPrefixD); } uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes); uint overloadEdgeNum = 0; if (overloadNodeNum > 0) { //cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl; thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setOverloadNodePointerSwap), dim3(grid), dim3(block), 0, 0, testNumNodes, overloadNodeListD, activeOverloadDegreeD, isOverloadActive, activeNodeLabelingPrefixD, degreeD); thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD); overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, 0); //cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl; overloadEdgeSum += overloadEdgeNum; if (overloadEdgeNum > edgeIterationMax) { edgeIterationMax = overloadEdgeNum; } } endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); startGpuProcessing = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( mixDynamicPartLabel), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, 0, activeNodeListD, isActiveD1, isActiveD2); thread staticCCKernel = thread(ccKernelThread, staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, isActiveD1, isActiveD2, isFinishedDevice, grid, block, steamStatic); if (staticCCKernel.joinable()) { staticCCKernel.join(); } if (overloadNodeNum > 0) { startCpu = std::chrono::steady_clock::now(); /*hipMemcpyAsync(activeNodeList, activeNodeListD, activeNodesNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic);*/ hipMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic); hipMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic); int threadNum = 20; if (overloadNodeNum < 50) { threadNum = 1; } thread runThreads[threadNum]; for (int i = 0; i < threadNum; i++) { runThreads[i] = thread(fillDynamic, i, threadNum, 0, overloadNodeNum, degree, activeOverloadNodePointers, nodePointersI, overloadNodeList, overloadEdgeList, edgeList); } for (unsigned int t = 0; t < threadNum; t++) { runThreads[t].join(); } caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr, overloadNodeNum, partOverloadSize, overloadEdgeNum); endReadCpu = std::chrono::steady_clock::now(); durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count(); if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); for (int i = 0; i < partEdgeListInfoArr.size(); i++) { startMemoryTraverse = std::chrono::steady_clock::now(); gpuErrorcheck(hipMemcpy(overloadEdgeListD, overloadEdgeList + activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex], partEdgeListInfoArr[i].partEdgeNums * sizeof(uint), hipMemcpyHostToDevice)) transferSum += partEdgeListInfoArr[i].partEdgeNums; endMemoryTraverse = std::chrono::steady_clock::now(); durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>( endMemoryTraverse - startMemoryTraverse).count(); /*cout << "iter " << iter << " part " << i << " durationMemoryTraverse " << durationMemoryTraverse << endl;*/ startOverloadGpuProcessing = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( mixDynamicPartLabel), dim3(grid), dim3(block), 0, streamDynamic, partEdgeListInfoArr[i].partActiveNodeNums, partEdgeListInfoArr[i].partStartIndex, overloadNodeListD, isActiveD1, isActiveD2); uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; hipMemcpy(isFinishedDevice, &isFinishedHost, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cc_kernelDynamicSwap2Label), dim3(grid), dim3(block), 0, streamDynamic, partEdgeListInfoArr[i].partStartIndex, partEdgeListInfoArr[i].partActiveNodeNums, overloadNodeListD, degreeD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, overloadEdgeListD, activeOverloadNodePointersD, isFinishedDevice); hipDeviceSynchronize(); hipMemcpy(&isFinishedHost, isFinishedDevice, sizeof(bool), hipMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); endOverloadGpuProcessing = std::chrono::steady_clock::now(); durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endOverloadGpuProcessing - startOverloadGpuProcessing).count(); /*cout << "iter " << iter << " part " << i << " durationOverloadGpuProcessing " << durationOverloadGpuProcessing << endl;*/ } gpuErrorcheck(hipPeekAtLastError()) } else { if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); } hipLaunchKernelGGL(( mixCommonLabel), dim3(grid), dim3(block), 0, streamDynamic, testNumNodes, isActiveD1, isActiveD2); //hipDeviceSynchronize(); //cout << "mixDynamicPartLabel" << " =========hipDeviceSynchronize()==========" << endl; //hipMemcpy(label, isActiveD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost); startPreGpuProcessing = std::chrono::steady_clock::now(); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); } hipDeviceSynchronize(); hipMemcpy(value, valueD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost); hipMemcpy(vertexVisitRecord, vertexVisitRecordD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost); uint partNum = 50; uint partSize = testNumEdge / partNum; vector<uint> partVistRecordList(partNum + 1); uint partSizeCursor = 0; for (uint i = 0; i < testNumNodes; i++) { uint edgeStartIndex = nodePointersI[i]; uint edgeEndIndex = nodePointersI[i] + degree[i]; uint maxPartIndex = partSizeCursor * partSize + partSize; if (edgeStartIndex < maxPartIndex && edgeEndIndex < maxPartIndex) { partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i]; } else if (edgeStartIndex < maxPartIndex && edgeEndIndex >= maxPartIndex) { partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (maxPartIndex - edgeStartIndex); partSizeCursor += 1; partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (edgeEndIndex - maxPartIndex); } else { partSizeCursor += 1; partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i]; } } for (uint i = 0; i < partNum + 1; i++) { cout << "part " << i << " is " << partVistRecordList[i] << endl; } for (uint i = 0; i < partNum + 1; i++) { cout << partVistRecordList[i] << "\t"; } transferSum += max_partition_size; cout << "transferSum: " << transferSum * 4 << "byte" << endl; cout << "iterationSum " << iter << endl; double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter; double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge; cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl; cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "finish time : " << durationRead << " ms" << endl; cout << "total time : " << durationRead + testDuration << " ms" << endl; cout << "cpu time : " << durationReadCpu << " ms" << endl; cout << "pre fact processing time : " << durationGpuProcessing << " ms" << endl; cout << "overload fact processing time : " << durationOverloadGpuProcessing << " ms" << endl; cout << "durationMemoryTraverse : " << durationMemoryTraverse << " ms" << endl; cout << "durationOverloadGpuProcessing : " << durationOverloadGpuProcessing << " ms" << endl; cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl; cout << "swap processing time : " << durationSwap << " ms" << endl; cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl; cout << "swapValidNodeSum " << swapValidNodeSum << " swapValidEdgeSum " << swapValidEdgeSum << endl; cout << "swapNotValidNodeSum " << swapNotValidNodeSum << " swapNotValidEdgeSum " << swapNotValidEdgeSum << " visitSum " << visitEdgeSum << " swapInEdgeSum " << swapInEdgeSum << endl; cout << "headSum " << headSum << " tailSum " << tailSum << endl; /*hipFree(nodePointerD); hipFree(staticEdgeListD); hipFree(degreeD); hipFree(isActiveD1); hipFree(isActiveD2); hipFree(valueD); hipFree(activeNodeListD); hipFree(activeNodeLabelingPrefixD); hipFree(activeOverloadNodePointersD); hipFree(activeOverloadDegreeD); hipFree(isInStaticD); hipFree(staticNodePointerD); hipFree(overloadNodeListD); delete[] label; delete[] degree; delete[] value; delete[] activeNodeList; delete[] activeOverloadNodePointers; delete[] isInStatic; delete[] overloadNodeList; delete[] staticNodePointer; delete[] fragmentData; return durationRead;*/ } void ccShareTrace(string ccPath) { uint testNumNodes = 0; ulong testNumEdge = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); gpuErrorcheck(hipMallocManaged(&edgeList, (numEdge) * sizeof(uint))); hipMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), hipMemAdviseSetReadMostly, 0); hipMemAdvise(edgeList, (numEdge) * sizeof(uint), hipMemAdviseSetReadMostly, 0); infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); //preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge); auto endReadGraph = std::chrono::steady_clock::now(); long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>( endReadGraph - startReadGraph).count(); cout << "read graph time : " << durationReadGraph << "ms" << endl; int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { timeSum += ccCaculateInShareTrace(testNumNodes, testNumEdge, nodePointersI, edgeList); //timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907); break; } cout << "need cpu " << needCpu << " not need cpu " << notNeedCpu << endl; cout << "processingTime " << processingTimeSum / testTimes << " cpu time " << cpuTimeSum / testTimes << " all Time " << allTimeSum / testTimes << endl; cout << "mean time is " << timeSum / testTimes << endl; cout << "mean validSwapSum is " << validSwapSum / testTimes << endl; cout << trestSum << endl; } long ccCaculateInShareTrace(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList) { auto start = std::chrono::steady_clock::now(); uint *degree = new uint[testNumNodes]; uint *value = new uint[testNumNodes]; uint sourceCode = 0; auto startPreCaculate = std::chrono::steady_clock::now(); for (uint i = 0; i < testNumNodes - 1; i++) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; bool *label = new bool[testNumNodes]; for (uint i = 0; i < testNumNodes; i++) { label[i] = true; value[i] = i; } label[sourceCode] = true; value[sourceCode] = 1; uint *activeNodeListD; uint *degreeD; uint *valueD; bool *labelD; uint *nodePointersD; hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint)); hipMalloc(&nodePointersD, testNumNodes * sizeof(uint)); hipMalloc(&degreeD, testNumNodes * sizeof(uint)); hipMalloc(&valueD, testNumNodes * sizeof(uint)); hipMalloc(&labelD, testNumNodes * sizeof(bool)); hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); hipMemcpy(labelD, label, testNumNodes * sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(nodePointersD, nodePointersI, testNumNodes * sizeof(uint), hipMemcpyHostToDevice); //cacaulate the active node And make active node array uint *activeNodeLabelingD; gpuErrorcheck(hipMalloc(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int))); uint *activeNodeLabelingPrefixD; gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; auto startProcessing = std::chrono::steady_clock::now(); while (activeNodesNum > 0) { iter++; thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, labelD, activeNodeLabelingPrefixD); hipLaunchKernelGGL(( setLabelDefault), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeListD, labelD); hipLaunchKernelGGL(( cc_kernel), dim3(grid), dim3(block), 0, 0, activeNodesNum, activeNodeListD, nodePointersD, degreeD, edgeList, valueD, labelD); hipDeviceSynchronize(); gpuErrorcheck(hipPeekAtLastError()); for (uint j = 0; j < testNumEdge; j++) { uint temp = edgeList[j]; if (temp >= 0) { uint a = temp + 1; } } hipLaunchKernelGGL(( setLabeling), dim3(grid), dim3(block), 0, 0, testNumNodes, labelD, activeNodeLabelingD); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl; } hipDeviceSynchronize(); cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl; return durationRead; } long ccCaculateCommonMemoryInnerAsyncRandom(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, float adviseK) { cout << "=========ccCaculateCommonMemoryInnerAsync1========" << endl; ulong edgeIterationMax = 0; auto start = std::chrono::steady_clock::now(); auto startPreCaculate = std::chrono::steady_clock::now(); //CPU long durationRead; ulong transferSum = 0; unsigned long max_partition_size; unsigned long total_gpu_size; uint maxStaticNode = 0; uint *degree; uint *value; uint *label; bool *isInStatic; uint *overloadNodeList; uint *staticNodePointer; uint *activeNodeList; uint *activeOverloadNodePointers; vector<PartEdgeListInfo> partEdgeListInfoArr; /* * overloadEdgeList overload edge list in every iteration * */ uint *overloadEdgeList; FragmentData *fragmentData; bool isFromTail = true; //GPU uint *staticEdgeListD; uint *overloadEdgeListD; bool *isInStaticD; uint *overloadNodeListD; uint *staticNodePointerD; uint *nodePointerD; uint *degreeD; // async need two labels uint *isActiveD1; uint *isActiveD2; uint *isStaticActive; uint *isOverloadActive; uint *valueD; uint *activeNodeListD; uint *activeNodeLabelingPrefixD; uint *activeOverloadNodePointersD; uint *activeOverloadDegreeD; bool *isFinishedDevice; degree = new uint[testNumNodes]; value = new uint[testNumNodes]; label = new uint[testNumNodes]; isInStatic = new bool[testNumNodes]; overloadNodeList = new uint[testNumNodes]; staticNodePointer = new uint[testNumNodes]; activeNodeList = new uint[testNumNodes]; activeOverloadNodePointers = new uint[testNumNodes]; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15); gpuErrorcheck(hipMalloc(&isFinishedDevice, 1 * sizeof(bool))); //caculate degree calculateDegree(testNumNodes, nodePointersI, testNumEdge, degree); //memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint)); uint edgesInStatic = 0; float startRate = (1 - (float) max_partition_size / (float) testNumEdge) / 2; uint startIndex = (float) testNumNodes * startRate; uint tempStaticSum = 0; /*for (uint i = testNumNodes - 1; i >= 0; i--) { tempStaticSum += degree[i]; if (tempStaticSum > max_partition_size) { startIndex = i; break; } }*/ //startIndex = 0; if (nodePointersI[startIndex] + max_partition_size > testNumEdge) { startIndex = (float) testNumNodes * 0.1f; } for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; if (i >= startIndex && nodePointersI[i] < nodePointersI[startIndex] + max_partition_size - degree[i]) { isInStatic[i] = true; staticNodePointer[i] = nodePointersI[i] - nodePointersI[startIndex]; if (i > maxStaticNode) { maxStaticNode = i; } edgesInStatic += degree[i]; } else { isInStatic[i] = false; } } gpuErrorcheck(hipMalloc(&staticEdgeListD, max_partition_size * sizeof(uint))); auto startmove = std::chrono::steady_clock::now(); gpuErrorcheck( hipMemcpy(staticEdgeListD, edgeList + nodePointersI[startIndex], max_partition_size * sizeof(uint), hipMemcpyHostToDevice)); auto endMove = std::chrono::steady_clock::now(); long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>( endMove - startmove).count(); cout << "move duration " << testDuration << endl; gpuErrorcheck(hipMalloc(&isInStaticD, testNumNodes * sizeof(bool))) gpuErrorcheck(hipMalloc(&overloadNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&staticNodePointerD, testNumNodes * sizeof(uint))) gpuErrorcheck( hipMemcpy(staticNodePointerD, staticNodePointer, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice); uint partOverloadSize = total_gpu_size - max_partition_size; uint overloadSize = testNumEdge - edgesInStatic; cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl; overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint)); gpuErrorcheck(hipMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint))); gpuErrorcheck(hipMalloc(&degreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isActiveD1, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isActiveD2, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isStaticActive, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&isOverloadActive, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&valueD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&activeNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemcpy(degreeD, degree, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); //setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1); thrust::device_ptr<unsigned int> ptr_labelingTest(isActiveD2); thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive); thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD); thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(activeOverloadNodePointersD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; ulong overloadEdgeSum = 0; auto startCpu = std::chrono::steady_clock::now(); auto endReadCpu = std::chrono::steady_clock::now(); long durationReadCpu = 0; auto startSwap = std::chrono::steady_clock::now(); auto endSwap = std::chrono::steady_clock::now(); long durationSwap = 0; auto startGpuProcessing = std::chrono::steady_clock::now(); auto endGpuProcessing = std::chrono::steady_clock::now(); long durationGpuProcessing = 0; auto startOverloadGpuProcessing = std::chrono::steady_clock::now(); auto endOverloadGpuProcessing = std::chrono::steady_clock::now(); long durationOverloadGpuProcessing = 0; auto startPreGpuProcessing = std::chrono::steady_clock::now(); auto endPreGpuProcessing = std::chrono::steady_clock::now(); long durationPreGpuProcessing = 0; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; hipStream_t steamStatic, streamDynamic; hipStreamCreate(&steamStatic); hipStreamCreate(&streamDynamic); auto startMemoryTraverse = std::chrono::steady_clock::now(); auto endMemoryTraverse = std::chrono::steady_clock::now(); long durationMemoryTraverse = 0; //uint cursorStartSwap = staticFragmentNum + 1; uint swapValidNodeSum = 0; uint swapValidEdgeSum = 0; uint swapNotValidNodeSum = 0; uint swapNotValidEdgeSum = 0; uint visitEdgeSum = 0; uint swapInEdgeSum = 0; uint headSum; uint tailSum; long TIME = 0; int testTimes = 10; for (int testIndex = 0; testIndex < testTimes; testIndex++) { for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; } hipMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), hipMemcpyHostToDevice); gpuErrorcheck(hipMemcpy(valueD, value, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(hipMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); iter = 0; auto startProcessing = std::chrono::steady_clock::now(); auto startTest = std::chrono::steady_clock::now(); auto endTest = std::chrono::steady_clock::now(); long durationTest = 0; while (activeNodesNum > 0) { iter++; //cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl; startPreGpuProcessing = std::chrono::steady_clock::now(); //cleanStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isStaticActive, isOverloadActive); hipLaunchKernelGGL(( setStaticAndOverloadLabel), dim3(grid), dim3(block), 0, 0, testNumNodes, isActiveD1, isStaticActive, isOverloadActive, isInStaticD); uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes); if (staticNodeNum > 0) { //cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl; thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setStaticActiveNodeArray), dim3(grid), dim3(block), 0, 0, testNumNodes, activeNodeListD, isStaticActive, activeNodeLabelingPrefixD); } uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes); uint overloadEdgeNum = 0; if (overloadNodeNum > 0) { //cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl; thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( setOverloadNodePointerSwap), dim3(grid), dim3(block), 0, 0, testNumNodes, overloadNodeListD, activeOverloadDegreeD, isOverloadActive, activeNodeLabelingPrefixD, degreeD); thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD); overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, 0); //cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl; overloadEdgeSum += overloadEdgeNum; if (overloadEdgeNum > edgeIterationMax) { edgeIterationMax = overloadEdgeNum; } } endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); startGpuProcessing = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( mixDynamicPartLabel), dim3(grid), dim3(block), 0, steamStatic, staticNodeNum, 0, activeNodeListD, isActiveD1, isActiveD2); thread staticCCKernel = thread(ccKernelThread, staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, isActiveD1, isActiveD2, isFinishedDevice, grid, block, steamStatic); /*if (staticCCKernel.joinable()) { staticCCKernel.join(); }*/ if (overloadNodeNum > 0) { startCpu = std::chrono::steady_clock::now(); /*hipMemcpyAsync(activeNodeList, activeNodeListD, activeNodesNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic);*/ hipMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic); hipMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint), hipMemcpyDeviceToHost, streamDynamic); int threadNum = 20; if (overloadNodeNum < 50) { threadNum = 1; } thread runThreads[threadNum]; for (int i = 0; i < threadNum; i++) { runThreads[i] = thread(fillDynamic, i, threadNum, 0, overloadNodeNum, degree, activeOverloadNodePointers, nodePointersI, overloadNodeList, overloadEdgeList, edgeList); } for (unsigned int t = 0; t < threadNum; t++) { runThreads[t].join(); } caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr, overloadNodeNum, partOverloadSize, overloadEdgeNum); endReadCpu = std::chrono::steady_clock::now(); durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count(); if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); for (auto &i : partEdgeListInfoArr) { startMemoryTraverse = std::chrono::steady_clock::now(); gpuErrorcheck(hipMemcpy(overloadEdgeListD, overloadEdgeList + activeOverloadNodePointers[i.partStartIndex], i.partEdgeNums * sizeof(uint), hipMemcpyHostToDevice)) transferSum += i.partEdgeNums; endMemoryTraverse = std::chrono::steady_clock::now(); durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>( endMemoryTraverse - startMemoryTraverse).count(); /*cout << "iter " << iter << " part " << i << " durationMemoryTraverse " << durationMemoryTraverse << endl;*/ startOverloadGpuProcessing = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( mixDynamicPartLabel), dim3(grid), dim3(block), 0, streamDynamic, i.partActiveNodeNums, i.partStartIndex, overloadNodeListD, isActiveD1, isActiveD2); uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; hipMemcpy(isFinishedDevice, &isFinishedHost, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cc_kernelDynamicSwap2Label), dim3(grid), dim3(block), 0, streamDynamic, i.partStartIndex, i.partActiveNodeNums, overloadNodeListD, degreeD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, overloadEdgeListD, activeOverloadNodePointersD, isFinishedDevice); hipDeviceSynchronize(); hipMemcpy(&isFinishedHost, isFinishedDevice, sizeof(bool), hipMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); endOverloadGpuProcessing = std::chrono::steady_clock::now(); durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endOverloadGpuProcessing - startOverloadGpuProcessing).count(); /*cout << "iter " << iter << " part " << i << " durationOverloadGpuProcessing " << durationOverloadGpuProcessing << endl;*/ } gpuErrorcheck(hipPeekAtLastError()) } else { if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); } hipLaunchKernelGGL(( mixCommonLabel), dim3(grid), dim3(block), 0, streamDynamic, testNumNodes, isActiveD1, isActiveD2); //hipDeviceSynchronize(); //cout << "mixDynamicPartLabel" << " =========hipDeviceSynchronize()==========" << endl; //hipMemcpy(label, isActiveD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost); startPreGpuProcessing = std::chrono::steady_clock::now(); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); } hipDeviceSynchronize(); hipMemcpy(value, valueD, testNumNodes * sizeof(uint), hipMemcpyDeviceToHost); transferSum += max_partition_size; cout << "transferSum: " << transferSum * 4 << "byte" << endl; cout << "iterationSum " << iter << endl; double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter; double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge; cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl; cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "finish time : " << durationRead << " ms" << endl; cout << "total time : " << durationRead + testDuration << " ms" << endl; cout << "cpu time : " << durationReadCpu << " ms" << endl; cout << "pre fact processing time : " << durationGpuProcessing << " ms" << endl; cout << "overload fact processing time : " << durationOverloadGpuProcessing << " ms" << endl; cout << "durationMemoryTraverse : " << durationMemoryTraverse << " ms" << endl; cout << "durationOverloadGpuProcessing : " << durationOverloadGpuProcessing << " ms" << endl; cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl; cout << "swap processing time : " << durationSwap << " ms" << endl; cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl; cout << "swapValidNodeSum " << swapValidNodeSum << " swapValidEdgeSum " << swapValidEdgeSum << endl; cout << "swapNotValidNodeSum " << swapNotValidNodeSum << " swapNotValidEdgeSum " << swapNotValidEdgeSum << " visitSum " << visitEdgeSum << " swapInEdgeSum " << swapInEdgeSum << endl; cout << "headSum " << headSum << " tailSum " << tailSum << endl; TIME += durationRead; } cout << "TIME " << (float) TIME / (float) testTimes << endl; /*hipFree(nodePointerD); hipFree(staticEdgeListD); hipFree(degreeD); hipFree(isActiveD1); hipFree(isActiveD2); hipFree(valueD); hipFree(activeNodeListD); hipFree(activeNodeLabelingPrefixD); hipFree(activeOverloadNodePointersD); hipFree(activeOverloadDegreeD); hipFree(isInStaticD); hipFree(staticNodePointerD); hipFree(overloadNodeListD); delete[] label; delete[] degree; delete[] value; delete[] activeNodeList; delete[] activeOverloadNodePointers; delete[] isInStatic; delete[] overloadNodeList; delete[] staticNodePointer; delete[] fragmentData; return durationRead;*/ }
c92900aff38270df45190bb996abd7194135c90a.cu
// // Created by gxl on 2021/1/5. // #include "cc.cuh" void conventionParticipateCC(string ccPath) { cout << "===============conventionParticipateCC==============" << endl; uint testNumNodes = 0; ulong testNumEdge = 0; unsigned long transferSum = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); edgeList = new uint[testNumEdge]; infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); unsigned long max_partition_size; unsigned long total_gpu_size; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.9, sizeof(uint), 5); uint partitionNum; if (testNumEdge > max_partition_size) { partitionNum = testNumEdge / max_partition_size + 1; } else { partitionNum = 1; } uint *degree = new uint[testNumNodes]; uint *value = new uint[testNumNodes]; bool *isActiveNodeList = new bool[testNumNodes]; CommonPartitionInfo *partitionInfoList = new CommonPartitionInfo[partitionNum]; bool *needTransferPartition = new bool[partitionNum]; for (uint i = 0; i < testNumNodes; i++) { isActiveNodeList[i] = true; value[i] = i; if (i + 1 < testNumNodes) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } else { degree[i] = testNumEdge - nodePointersI[i]; } if (degree[i] > max_partition_size) { cout << "node " << i << " degree > maxPartition " << endl; return; } } for (uint i = 0; i < partitionNum; i++) { partitionInfoList[i].startVertex = -1; partitionInfoList[i].endVertex = -1; partitionInfoList[i].nodePointerOffset = -1; partitionInfoList[i].partitionEdgeSize = -1; } int tempPartitionIndex = 0; uint tempNodeIndex = 0; while (tempNodeIndex < testNumNodes) { if (partitionInfoList[tempPartitionIndex].startVertex == -1) { partitionInfoList[tempPartitionIndex].startVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].nodePointerOffset = nodePointersI[tempNodeIndex]; partitionInfoList[tempPartitionIndex].partitionEdgeSize = degree[tempNodeIndex]; tempNodeIndex++; } else { if (partitionInfoList[tempPartitionIndex].partitionEdgeSize + degree[tempNodeIndex] > max_partition_size) { tempPartitionIndex++; } else { partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].partitionEdgeSize += degree[tempNodeIndex]; tempNodeIndex++; } } } uint *degreeD; bool *isActiveNodeListD; bool *nextActiveNodeListD; uint *nodePointerListD; uint *partitionEdgeListD; uint *valueD; cudaMalloc(&degreeD, testNumNodes * sizeof(uint)); cudaMalloc(&valueD, testNumNodes * sizeof(uint)); cudaMalloc(&isActiveNodeListD, testNumNodes * sizeof(bool)); cudaMalloc(&nextActiveNodeListD, testNumNodes * sizeof(bool)); cudaMalloc(&nodePointerListD, testNumNodes * sizeof(uint)); cudaMalloc(&partitionEdgeListD, max_partition_size * sizeof(uint)); cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); cudaMemcpy(nodePointerListD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); cudaMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { for (int j = 0; j < testNumNodes; j++) { isActiveNodeList[j] = true; value[j] = j; } cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); uint activeSum = 0; int iteration = 0; auto startProcessing = std::chrono::steady_clock::now(); while (true) { uint activeNodeNum = 0; checkNeedTransferPartitionOpt(needTransferPartition, partitionInfoList, isActiveNodeList, partitionNum, testNumNodes, activeNodeNum); if (activeNodeNum <= 0) { break; } else { cout << "iteration " << iteration << " activeNodes " << activeNodeNum << endl; activeSum += activeNodeNum; } cudaMemcpy(isActiveNodeListD, isActiveNodeList, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice); for (int j = 0; j < partitionNum; j++) { if (needTransferPartition[j]) { cudaMemcpy(partitionEdgeListD, edgeList + partitionInfoList[j].nodePointerOffset, partitionInfoList[j].partitionEdgeSize * sizeof(uint), cudaMemcpyHostToDevice); transferSum += partitionInfoList[j].partitionEdgeSize; ccKernel_CommonPartition<<<grid, block>>>(partitionInfoList[j].startVertex, partitionInfoList[j].endVertex, partitionInfoList[j].nodePointerOffset, isActiveNodeListD, nodePointerListD, partitionEdgeListD, degreeD, valueD, nextActiveNodeListD); cudaDeviceSynchronize(); gpuErrorcheck(cudaPeekAtLastError()) } } cudaMemcpy(isActiveNodeList, nextActiveNodeListD, testNumNodes * sizeof(bool), cudaMemcpyDeviceToHost); cudaMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); iteration++; } cout << "cpu transfer to gpu " << transferSum * sizeof(uint) << "byte" << endl; cout << " activeSum " << activeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << " finish time : " << durationRead << " ms" << endl; } free(nodePointersI); free(edgeList); free(degree); free(isActiveNodeList); cudaFree(isActiveNodeListD); cudaFree(nextActiveNodeListD); cudaFree(nodePointerListD); cudaFree(partitionEdgeListD); //todo free partitionInfoList needTransferPartition } int needCpu = 0; int notNeedCpu = 0; long processingTimeSum = 0; long cpuTimeSum = 0; long allTimeSum = 0; long validSwapSum = 0; int trestSum = 0; void ccShare(string ccPath) { uint testNumNodes = 0; ulong testNumEdge = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; gpuErrorcheck(cudaMallocManaged(&nodePointersI, (testNumNodes + 1) * sizeof(uint))); infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); gpuErrorcheck(cudaMallocManaged(&edgeList, (numEdge) * sizeof(uint))); cudaMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), cudaMemAdviseSetReadMostly, 0); cudaMemAdvise(edgeList, (numEdge) * sizeof(uint), cudaMemAdviseSetReadMostly, 0); infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); //preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge); auto endReadGraph = std::chrono::steady_clock::now(); long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>( endReadGraph - startReadGraph).count(); cout << "read graph time : " << durationReadGraph << "ms" << endl; int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { timeSum += ccCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList); //timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907); break; } cout << "need cpu " << needCpu << " not need cpu " << notNeedCpu << endl; cout << "processingTime " << processingTimeSum / testTimes << " cpu time " << cpuTimeSum / testTimes << " all Time " << allTimeSum / testTimes << endl; cout << "mean time is " << timeSum / testTimes << endl; cout << "mean validSwapSum is " << validSwapSum / testTimes << endl; cout << trestSum << endl; } long ccCaculateInShare(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList) { auto start = std::chrono::steady_clock::now(); uint *degree; uint *value; //uint *recordActiveNodes = new uint[testNumNodes]; gpuErrorcheck(cudaMallocManaged(&degree, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMallocManaged(&value, testNumNodes * sizeof(uint))); auto startPreCaculate = std::chrono::steady_clock::now(); for (uint i = 0; i < testNumNodes - 1; i++) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; bool *label; gpuErrorcheck(cudaMallocManaged(&label, testNumNodes * sizeof(bool))); for (uint i = 0; i < testNumNodes; i++) { label[i] = true; value[i] = i; } uint *activeNodeList; cudaMallocManaged(&activeNodeList, testNumNodes * sizeof(uint)); //cacaulate the active node And make active node array uint *activeNodeLabelingD; gpuErrorcheck(cudaMallocManaged(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int))); uint *activeNodeLabelingPrefixD; gpuErrorcheck(cudaMallocManaged(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); setLabeling<<<grid, block>>>(testNumNodes, label, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; auto startProcessing = std::chrono::steady_clock::now(); //vector<vector<uint>> visitRecordByIteration; while (activeNodesNum > 0) { iter++; thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum); setActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeList, label, activeNodeLabelingPrefixD); setLabelDefault<<<grid, block>>>(activeNodesNum, activeNodeList, label); cc_kernel<<<grid, block>>>(activeNodesNum, activeNodeList, nodePointersI, degree, edgeList, value, label); cudaDeviceSynchronize(); //visitRecordByIteration.push_back(countDataByIteration(testNumEdge, testNumNodes, nodePointersI, degree, activeNodeLabelingD)); gpuErrorcheck(cudaPeekAtLastError()); setLabeling<<<grid, block>>>(testNumNodes, label, activeNodeLabelingD); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl; } cudaDeviceSynchronize(); //writeTrunkVistInIteration(visitRecordByIteration, "./CountByIterationCC.txt"); cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl; cudaFree(degree); cudaFree(label); cudaFree(value); cudaFree(activeNodeList); cudaFree(activeNodeLabelingD); cudaFree(activeNodeLabelingPrefixD); return durationRead; } void ccKernelThread(uint staticNodeNum, uint *activeNodeListD, uint *staticNodePointerD, uint *degreeD, uint *staticEdgeListD, uint *valueD, uint *isActiveD1, uint *isActiveD2, bool *isFinishedManaged, dim3 grid, dim3 block, cudaStream_t steamStatic) { uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; cudaMemcpy(isFinishedManaged, &isFinishedHost, sizeof(bool), cudaMemcpyHostToDevice); cc_kernelStaticSwapOpt2Label<<<grid, block, 0, steamStatic>>>(staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, isFinishedManaged); cudaDeviceSynchronize(); cudaMemcpy(&isFinishedHost, isFinishedManaged, sizeof(bool), cudaMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); } void ccOpt(string ccPath, float adviseK) { uint testNumNodes = 0; ulong testNumEdge = 0; uint *nodePointersI; uint *edgeList; bool isUseShare = true; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes + 1]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); edgeList = new uint[testNumEdge + 1]; infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); auto endReadGraph = std::chrono::steady_clock::now(); long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>( endReadGraph - startReadGraph).count(); cout << "read graph time : " << durationReadGraph << "ms" << endl; int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { //timeSum += ccCaculateCommonMemoryInnerAsync(testNumNodes, testNumEdge, nodePointersI, edgeList, adviseK); //break; timeSum += ccCaculateCommonMemoryInnerAsyncRandom(testNumNodes, testNumEdge, nodePointersI, edgeList, adviseK); cout << i << "========================================" << endl; } } struct TempConnectedComponent { uint index; uint nodeSum; uint edgeSum; }; long ccCaculateCommonMemoryInnerAsync(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, float adviseK) { cout << "=========ccCaculateCommonMemoryInnerAsync1========" << endl; ulong edgeIterationMax = 0; auto start = std::chrono::steady_clock::now(); auto startPreCaculate = std::chrono::steady_clock::now(); //CPU long durationRead; ulong transferSum = 0; unsigned long max_partition_size; unsigned long total_gpu_size; uint maxStaticNode = 0; uint *degree; uint *value; uint *label; bool *isInStatic; uint *overloadNodeList; uint *staticNodePointer; uint *activeNodeList; uint *activeOverloadNodePointers; vector<PartEdgeListInfo> partEdgeListInfoArr; /* * overloadEdgeList overload edge list in every iteration * */ uint *overloadEdgeList; FragmentData *fragmentData; bool isFromTail = true; //GPU uint *staticEdgeListD; uint *overloadEdgeListD; bool *isInStaticD; uint *overloadNodeListD; uint *staticNodePointerD; uint *nodePointerD; uint *degreeD; // async need two labels uint *isActiveD1; uint *isActiveD2; uint *isStaticActive; uint *isOverloadActive; uint *valueD; uint *activeNodeListD; uint *activeNodeLabelingPrefixD; uint *activeOverloadNodePointersD; uint *activeOverloadDegreeD; bool *isFinishedDevice; degree = new uint[testNumNodes]; value = new uint[testNumNodes]; label = new uint[testNumNodes]; isInStatic = new bool[testNumNodes]; overloadNodeList = new uint[testNumNodes]; staticNodePointer = new uint[testNumNodes]; activeNodeList = new uint[testNumNodes]; activeOverloadNodePointers = new uint[testNumNodes]; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15); //caculate degree uint meanDegree = testNumEdge / testNumNodes; cout << " meanDegree " << meanDegree << endl; uint degree0Sum = 0; for (uint i = 0; i < testNumNodes - 1; i++) { if (nodePointersI[i] > testNumEdge) { cout << i << " " << nodePointersI[i] << endl; break; } degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint)); //caculate static staticEdgeListD gpuErrorcheck(cudaMalloc(&isFinishedDevice, 1 * sizeof(bool))); gpuErrorcheck(cudaMalloc(&staticEdgeListD, max_partition_size * sizeof(uint))); auto startmove = std::chrono::steady_clock::now(); gpuErrorcheck(cudaMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), cudaMemcpyHostToDevice)); auto endMove = std::chrono::steady_clock::now(); long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>( endMove - startmove).count(); gpuErrorcheck(cudaMalloc(&isInStaticD, testNumNodes * sizeof(bool))) gpuErrorcheck(cudaMalloc(&overloadNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&staticNodePointerD, testNumNodes * sizeof(uint))) gpuErrorcheck(cudaMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMalloc(&nodePointerD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemcpy(nodePointerD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) { isInStatic[i] = true; if (i > maxStaticNode) maxStaticNode = i; } else { isInStatic[i] = false; } } cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice); cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl; uint partOverloadSize = total_gpu_size - max_partition_size; uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1]; cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl; overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint)); gpuErrorcheck(cudaMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint))); gpuErrorcheck(cudaMalloc(&degreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isActiveD1, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isActiveD2, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isStaticActive, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isOverloadActive, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&valueD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); //setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1); thrust::device_ptr<unsigned int> ptr_labelingTest(isActiveD2); thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive); thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD); thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(activeOverloadNodePointersD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; ulong overloadEdgeSum = 0; auto startCpu = std::chrono::steady_clock::now(); auto endReadCpu = std::chrono::steady_clock::now(); long durationReadCpu = 0; auto startSwap = std::chrono::steady_clock::now(); auto endSwap = std::chrono::steady_clock::now(); long durationSwap = 0; auto startGpuProcessing = std::chrono::steady_clock::now(); auto endGpuProcessing = std::chrono::steady_clock::now(); long durationGpuProcessing = 0; auto startOverloadGpuProcessing = std::chrono::steady_clock::now(); auto endOverloadGpuProcessing = std::chrono::steady_clock::now(); long durationOverloadGpuProcessing = 0; auto startPreGpuProcessing = std::chrono::steady_clock::now(); auto endPreGpuProcessing = std::chrono::steady_clock::now(); long durationPreGpuProcessing = 0; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; cudaStream_t steamStatic, streamDynamic; cudaStreamCreate(&steamStatic); cudaStreamCreate(&streamDynamic); auto startMemoryTraverse = std::chrono::steady_clock::now(); auto endMemoryTraverse = std::chrono::steady_clock::now(); long durationMemoryTraverse = 0; auto startProcessing = std::chrono::steady_clock::now(); //uint cursorStartSwap = staticFragmentNum + 1; uint swapValidNodeSum = 0; uint swapValidEdgeSum = 0; uint swapNotValidNodeSum = 0; uint swapNotValidEdgeSum = 0; uint visitEdgeSum = 0; uint swapInEdgeSum = 0; uint headSum; uint tailSum; while (activeNodesNum > 0) { iter++; //cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl; startPreGpuProcessing = std::chrono::steady_clock::now(); //cleanStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isStaticActive, isOverloadActive); setStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isActiveD1, isStaticActive, isOverloadActive, isInStaticD); uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes); if (staticNodeNum > 0) { //cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl; thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum); setStaticActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, isStaticActive, activeNodeLabelingPrefixD); } uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes); uint overloadEdgeNum = 0; if (overloadNodeNum > 0) { //cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl; thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptr_labeling_prefixsum); setOverloadNodePointerSwap<<<grid, block>>>(testNumNodes, overloadNodeListD, activeOverloadDegreeD, isOverloadActive, activeNodeLabelingPrefixD, degreeD); thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD); overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, 0); //cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl; overloadEdgeSum += overloadEdgeNum; if (overloadEdgeNum > edgeIterationMax) { edgeIterationMax = overloadEdgeNum; } } endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); startGpuProcessing = std::chrono::steady_clock::now(); mixDynamicPartLabel<<<grid, block, 0, steamStatic>>>(staticNodeNum, 0, activeNodeListD, isActiveD1, isActiveD2); thread staticCCKernel = thread(ccKernelThread, staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, isActiveD1, isActiveD2, isFinishedDevice, grid, block, steamStatic); /*if (staticCCKernel.joinable()) { staticCCKernel.join(); }*/ if (overloadNodeNum > 0) { startCpu = std::chrono::steady_clock::now(); /*cudaMemcpyAsync(activeNodeList, activeNodeListD, activeNodesNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic);*/ cudaMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic); cudaMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic); int threadNum = 20; if (overloadNodeNum < 50) { threadNum = 1; } thread runThreads[threadNum]; for (int i = 0; i < threadNum; i++) { runThreads[i] = thread(fillDynamic, i, threadNum, 0, overloadNodeNum, degree, activeOverloadNodePointers, nodePointersI, overloadNodeList, overloadEdgeList, edgeList); } for (unsigned int t = 0; t < threadNum; t++) { runThreads[t].join(); } caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr, overloadNodeNum, partOverloadSize, overloadEdgeNum); endReadCpu = std::chrono::steady_clock::now(); durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count(); if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); for (auto &i : partEdgeListInfoArr) { startMemoryTraverse = std::chrono::steady_clock::now(); gpuErrorcheck(cudaMemcpy(overloadEdgeListD, overloadEdgeList + activeOverloadNodePointers[i.partStartIndex], i.partEdgeNums * sizeof(uint), cudaMemcpyHostToDevice)) transferSum += i.partEdgeNums; endMemoryTraverse = std::chrono::steady_clock::now(); durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>( endMemoryTraverse - startMemoryTraverse).count(); /*cout << "iter " << iter << " part " << i << " durationMemoryTraverse " << durationMemoryTraverse << endl;*/ startOverloadGpuProcessing = std::chrono::steady_clock::now(); mixDynamicPartLabel<<<grid, block, 0, streamDynamic>>>(i.partActiveNodeNums, i.partStartIndex, overloadNodeListD, isActiveD1, isActiveD2); uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; cudaMemcpy(isFinishedDevice, &isFinishedHost, sizeof(bool), cudaMemcpyHostToDevice); cc_kernelDynamicSwap2Label<<<grid, block, 0, streamDynamic>>>(i.partStartIndex, i.partActiveNodeNums, overloadNodeListD, degreeD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, overloadEdgeListD, activeOverloadNodePointersD, isFinishedDevice); cudaDeviceSynchronize(); cudaMemcpy(&isFinishedHost, isFinishedDevice, sizeof(bool), cudaMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); endOverloadGpuProcessing = std::chrono::steady_clock::now(); durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endOverloadGpuProcessing - startOverloadGpuProcessing).count(); /*cout << "iter " << iter << " part " << i << " durationOverloadGpuProcessing " << durationOverloadGpuProcessing << endl;*/ } gpuErrorcheck(cudaPeekAtLastError()) } else { if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); } mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2); //cudaDeviceSynchronize(); //cout << "mixDynamicPartLabel" << " =========cudaDeviceSynchronize()==========" << endl; //cudaMemcpy(label, isActiveD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost); startPreGpuProcessing = std::chrono::steady_clock::now(); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); } cudaDeviceSynchronize(); cudaMemcpy(value, valueD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost); transferSum += max_partition_size; cout << "transferSum: " << transferSum * 4 << "byte" << endl; cout << "iterationSum " << iter << endl; double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter; double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge; cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl; cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "finish time : " << durationRead << " ms" << endl; cout << "total time : " << durationRead + testDuration << " ms" << endl; cout << "cpu time : " << durationReadCpu << " ms" << endl; cout << "pre fact processing time : " << durationGpuProcessing << " ms" << endl; cout << "overload fact processing time : " << durationOverloadGpuProcessing << " ms" << endl; cout << "durationMemoryTraverse : " << durationMemoryTraverse << " ms" << endl; cout << "durationOverloadGpuProcessing : " << durationOverloadGpuProcessing << " ms" << endl; cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl; cout << "swap processing time : " << durationSwap << " ms" << endl; cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl; cout << "swapValidNodeSum " << swapValidNodeSum << " swapValidEdgeSum " << swapValidEdgeSum << endl; cout << "swapNotValidNodeSum " << swapNotValidNodeSum << " swapNotValidEdgeSum " << swapNotValidEdgeSum << " visitSum " << visitEdgeSum << " swapInEdgeSum " << swapInEdgeSum << endl; cout << "headSum " << headSum << " tailSum " << tailSum << endl; /*cudaFree(nodePointerD); cudaFree(staticEdgeListD); cudaFree(degreeD); cudaFree(isActiveD1); cudaFree(isActiveD2); cudaFree(valueD); cudaFree(activeNodeListD); cudaFree(activeNodeLabelingPrefixD); cudaFree(activeOverloadNodePointersD); cudaFree(activeOverloadDegreeD); cudaFree(isInStaticD); cudaFree(staticNodePointerD); cudaFree(overloadNodeListD); delete[] label; delete[] degree; delete[] value; delete[] activeNodeList; delete[] activeOverloadNodePointers; delete[] isInStatic; delete[] overloadNodeList; delete[] staticNodePointer; delete[] fragmentData; return durationRead;*/ } void conventionParticipateCCInLong() { cout << "===============conventionParticipateCCInLong==============" << endl; uint testNumNodes = 0; ulong testNumEdge = 0; unsigned long transferSum = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(testGraphPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); edgeList = new uint[testNumEdge]; infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); unsigned long max_partition_size; unsigned long total_gpu_size; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, 0.9, sizeof(uint), 5); uint partitionNum; if (testNumEdge > max_partition_size) { partitionNum = testNumEdge / max_partition_size + 1; } else { partitionNum = 1; } uint *degree = new uint[testNumNodes]; uint *value = new uint[testNumNodes]; bool *isActiveNodeList = new bool[testNumNodes]; CommonPartitionInfo *partitionInfoList = new CommonPartitionInfo[partitionNum]; bool *needTransferPartition = new bool[partitionNum]; for (uint i = 0; i < testNumNodes; i++) { isActiveNodeList[i] = true; value[i] = i; if (i + 1 < testNumNodes) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } else { degree[i] = testNumEdge - nodePointersI[i]; } if (degree[i] > max_partition_size) { cout << "node " << i << " degree > maxPartition " << endl; return; } } for (uint i = 0; i < partitionNum; i++) { partitionInfoList[i].startVertex = -1; partitionInfoList[i].endVertex = -1; partitionInfoList[i].nodePointerOffset = -1; partitionInfoList[i].partitionEdgeSize = -1; } int tempPartitionIndex = 0; uint tempNodeIndex = 0; while (tempNodeIndex < testNumNodes) { if (partitionInfoList[tempPartitionIndex].startVertex == -1) { partitionInfoList[tempPartitionIndex].startVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].nodePointerOffset = nodePointersI[tempNodeIndex]; partitionInfoList[tempPartitionIndex].partitionEdgeSize = degree[tempNodeIndex]; tempNodeIndex++; } else { if (partitionInfoList[tempPartitionIndex].partitionEdgeSize + degree[tempNodeIndex] > max_partition_size) { tempPartitionIndex++; } else { partitionInfoList[tempPartitionIndex].endVertex = tempNodeIndex; partitionInfoList[tempPartitionIndex].partitionEdgeSize += degree[tempNodeIndex]; tempNodeIndex++; } } } uint *degreeD; bool *isActiveNodeListD; bool *nextActiveNodeListD; uint *nodePointerListD; uint *partitionEdgeListD; uint *valueD; cudaMalloc(&degreeD, testNumNodes * sizeof(uint)); cudaMalloc(&valueD, testNumNodes * sizeof(uint)); cudaMalloc(&isActiveNodeListD, testNumNodes * sizeof(bool)); cudaMalloc(&nextActiveNodeListD, testNumNodes * sizeof(bool)); cudaMalloc(&nodePointerListD, testNumNodes * sizeof(uint)); cudaMalloc(&partitionEdgeListD, max_partition_size * sizeof(uint)); cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); cudaMemcpy(nodePointerListD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); cudaMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { for (int j = 0; j < testNumNodes; j++) { isActiveNodeList[j] = true; value[j] = j; } cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); uint activeSum = 0; int iteration = 0; auto startProcessing = std::chrono::steady_clock::now(); while (true) { uint activeNodeNum = 0; checkNeedTransferPartition(needTransferPartition, partitionInfoList, isActiveNodeList, partitionNum, testNumNodes, activeNodeNum); if (activeNodeNum <= 0) { break; } else { cout << "iteration " << iteration << " activeNodes " << activeNodeNum << endl; activeSum += activeNodeNum; } cudaMemcpy(isActiveNodeListD, isActiveNodeList, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice); for (int j = 0; j < partitionNum; j++) { if (needTransferPartition[j]) { cudaMemcpy(partitionEdgeListD, edgeList + partitionInfoList[j].nodePointerOffset, partitionInfoList[j].partitionEdgeSize * sizeof(uint), cudaMemcpyHostToDevice); transferSum += partitionInfoList[j].partitionEdgeSize; ccKernel_CommonPartition<<<grid, block>>>(partitionInfoList[j].startVertex, partitionInfoList[j].endVertex, partitionInfoList[j].nodePointerOffset, isActiveNodeListD, nodePointerListD, partitionEdgeListD, degreeD, valueD, nextActiveNodeListD); cudaDeviceSynchronize(); gpuErrorcheck(cudaPeekAtLastError()) } } cudaMemcpy(isActiveNodeList, nextActiveNodeListD, testNumNodes * sizeof(bool), cudaMemcpyDeviceToHost); cudaMemset(nextActiveNodeListD, 0, testNumNodes * sizeof(bool)); iteration++; } cout << "cpu transfer to gpu " << transferSum * sizeof(uint) << "byte" << endl; cout << " activeSum " << activeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << " finish time : " << durationRead << " ms" << endl; } free(nodePointersI); free(edgeList); free(degree); free(isActiveNodeList); cudaFree(isActiveNodeListD); cudaFree(nextActiveNodeListD); cudaFree(nodePointerListD); cudaFree(partitionEdgeListD); } long ccCaculateCommonMemoryInnerAsyncRecordVisit(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, float adviseK) { cout << "=========ccCaculateCommonMemoryInnerAsync1========" << endl; ulong edgeIterationMax = 0; auto start = std::chrono::steady_clock::now(); auto startPreCaculate = std::chrono::steady_clock::now(); //CPU long durationRead; ulong transferSum = 0; unsigned long max_partition_size; unsigned long total_gpu_size; uint maxStaticNode = 0; uint *degree; uint *value; uint *label; bool *isInStatic; uint *overloadNodeList; uint *staticNodePointer; uint *activeNodeList; uint *activeOverloadNodePointers; vector<PartEdgeListInfo> partEdgeListInfoArr; /* * overloadEdgeList overload edge list in every iteration * */ uint *overloadEdgeList; FragmentData *fragmentData; bool isFromTail = true; //GPU uint *staticEdgeListD; uint *overloadEdgeListD; bool *isInStaticD; uint *overloadNodeListD; uint *staticNodePointerD; uint *nodePointerD; uint *degreeD; // async need two labels uint *isActiveD1; uint *isActiveD2; uint *isStaticActive; uint *isOverloadActive; uint *valueD; uint *activeNodeListD; uint *activeNodeLabelingPrefixD; uint *activeOverloadNodePointersD; uint *activeOverloadDegreeD; bool *isFinishedDevice; uint *vertexVisitRecord; uint *vertexVisitRecordD; vertexVisitRecord = new uint[testNumNodes]; cudaMalloc(&vertexVisitRecordD, testNumNodes * sizeof(uint)); cudaMemset(vertexVisitRecordD, 0, testNumNodes * sizeof(uint)); degree = new uint[testNumNodes]; value = new uint[testNumNodes]; label = new uint[testNumNodes]; isInStatic = new bool[testNumNodes]; overloadNodeList = new uint[testNumNodes]; staticNodePointer = new uint[testNumNodes]; activeNodeList = new uint[testNumNodes]; activeOverloadNodePointers = new uint[testNumNodes]; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15); //caculate degree uint meanDegree = testNumEdge / testNumNodes; cout << " meanDegree " << meanDegree << endl; uint degree0Sum = 0; for (uint i = 0; i < testNumNodes - 1; i++) { if (nodePointersI[i] > testNumEdge) { cout << i << " " << nodePointersI[i] << endl; break; } degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint)); //caculate static staticEdgeListD gpuErrorcheck(cudaMalloc(&isFinishedDevice, 1 * sizeof(bool))); gpuErrorcheck(cudaMalloc(&staticEdgeListD, max_partition_size * sizeof(uint))); auto startmove = std::chrono::steady_clock::now(); gpuErrorcheck(cudaMemcpy(staticEdgeListD, edgeList, max_partition_size * sizeof(uint), cudaMemcpyHostToDevice)); auto endMove = std::chrono::steady_clock::now(); long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>( endMove - startmove).count(); gpuErrorcheck(cudaMalloc(&isInStaticD, testNumNodes * sizeof(bool))) gpuErrorcheck(cudaMalloc(&overloadNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&staticNodePointerD, testNumNodes * sizeof(uint))) gpuErrorcheck(cudaMemcpy(staticNodePointerD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMalloc(&nodePointerD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemcpy(nodePointerD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; if (nodePointersI[i] < max_partition_size && (nodePointersI[i] + degree[i] - 1) < max_partition_size) { isInStatic[i] = true; if (i > maxStaticNode) maxStaticNode = i; } else { isInStatic[i] = false; } } cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice); cout << "max_partition_size: " << max_partition_size << " maxStaticNode: " << maxStaticNode << endl; uint partOverloadSize = total_gpu_size - max_partition_size; uint overloadSize = testNumEdge - nodePointersI[maxStaticNode + 1]; cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl; overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint)); gpuErrorcheck(cudaMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint))); gpuErrorcheck(cudaMalloc(&degreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isActiveD1, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isActiveD2, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isStaticActive, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isOverloadActive, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&valueD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); //setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1); thrust::device_ptr<unsigned int> ptr_labelingTest(isActiveD2); thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive); thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD); thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(activeOverloadNodePointersD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; ulong overloadEdgeSum = 0; auto startCpu = std::chrono::steady_clock::now(); auto endReadCpu = std::chrono::steady_clock::now(); long durationReadCpu = 0; auto startSwap = std::chrono::steady_clock::now(); auto endSwap = std::chrono::steady_clock::now(); long durationSwap = 0; auto startGpuProcessing = std::chrono::steady_clock::now(); auto endGpuProcessing = std::chrono::steady_clock::now(); long durationGpuProcessing = 0; auto startOverloadGpuProcessing = std::chrono::steady_clock::now(); auto endOverloadGpuProcessing = std::chrono::steady_clock::now(); long durationOverloadGpuProcessing = 0; auto startPreGpuProcessing = std::chrono::steady_clock::now(); auto endPreGpuProcessing = std::chrono::steady_clock::now(); long durationPreGpuProcessing = 0; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; cudaStream_t steamStatic, streamDynamic; cudaStreamCreate(&steamStatic); cudaStreamCreate(&streamDynamic); auto startMemoryTraverse = std::chrono::steady_clock::now(); auto endMemoryTraverse = std::chrono::steady_clock::now(); long durationMemoryTraverse = 0; auto startProcessing = std::chrono::steady_clock::now(); //uint cursorStartSwap = staticFragmentNum + 1; uint swapValidNodeSum = 0; uint swapValidEdgeSum = 0; uint swapNotValidNodeSum = 0; uint swapNotValidEdgeSum = 0; uint visitEdgeSum = 0; uint swapInEdgeSum = 0; uint headSum; uint tailSum; while (activeNodesNum > 0) { iter++; //cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl; startPreGpuProcessing = std::chrono::steady_clock::now(); //cleanStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isStaticActive, isOverloadActive); setStaticAndOverloadLabelAndRecord<<<grid, block>>>(testNumNodes, isActiveD1, isStaticActive, isOverloadActive, isInStaticD, vertexVisitRecordD); uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes); if (staticNodeNum > 0) { //cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl; thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum); setStaticActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, isStaticActive, activeNodeLabelingPrefixD); } uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes); uint overloadEdgeNum = 0; if (overloadNodeNum > 0) { //cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl; thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptr_labeling_prefixsum); setOverloadNodePointerSwap<<<grid, block>>>(testNumNodes, overloadNodeListD, activeOverloadDegreeD, isOverloadActive, activeNodeLabelingPrefixD, degreeD); thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD); overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, 0); //cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl; overloadEdgeSum += overloadEdgeNum; if (overloadEdgeNum > edgeIterationMax) { edgeIterationMax = overloadEdgeNum; } } endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); startGpuProcessing = std::chrono::steady_clock::now(); mixDynamicPartLabel<<<grid, block, 0, steamStatic>>>(staticNodeNum, 0, activeNodeListD, isActiveD1, isActiveD2); thread staticCCKernel = thread(ccKernelThread, staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, isActiveD1, isActiveD2, isFinishedDevice, grid, block, steamStatic); if (staticCCKernel.joinable()) { staticCCKernel.join(); } if (overloadNodeNum > 0) { startCpu = std::chrono::steady_clock::now(); /*cudaMemcpyAsync(activeNodeList, activeNodeListD, activeNodesNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic);*/ cudaMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic); cudaMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic); int threadNum = 20; if (overloadNodeNum < 50) { threadNum = 1; } thread runThreads[threadNum]; for (int i = 0; i < threadNum; i++) { runThreads[i] = thread(fillDynamic, i, threadNum, 0, overloadNodeNum, degree, activeOverloadNodePointers, nodePointersI, overloadNodeList, overloadEdgeList, edgeList); } for (unsigned int t = 0; t < threadNum; t++) { runThreads[t].join(); } caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr, overloadNodeNum, partOverloadSize, overloadEdgeNum); endReadCpu = std::chrono::steady_clock::now(); durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count(); if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); for (int i = 0; i < partEdgeListInfoArr.size(); i++) { startMemoryTraverse = std::chrono::steady_clock::now(); gpuErrorcheck(cudaMemcpy(overloadEdgeListD, overloadEdgeList + activeOverloadNodePointers[partEdgeListInfoArr[i].partStartIndex], partEdgeListInfoArr[i].partEdgeNums * sizeof(uint), cudaMemcpyHostToDevice)) transferSum += partEdgeListInfoArr[i].partEdgeNums; endMemoryTraverse = std::chrono::steady_clock::now(); durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>( endMemoryTraverse - startMemoryTraverse).count(); /*cout << "iter " << iter << " part " << i << " durationMemoryTraverse " << durationMemoryTraverse << endl;*/ startOverloadGpuProcessing = std::chrono::steady_clock::now(); mixDynamicPartLabel<<<grid, block, 0, streamDynamic>>>(partEdgeListInfoArr[i].partActiveNodeNums, partEdgeListInfoArr[i].partStartIndex, overloadNodeListD, isActiveD1, isActiveD2); uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; cudaMemcpy(isFinishedDevice, &isFinishedHost, sizeof(bool), cudaMemcpyHostToDevice); cc_kernelDynamicSwap2Label<<<grid, block, 0, streamDynamic>>>(partEdgeListInfoArr[i].partStartIndex, partEdgeListInfoArr[i].partActiveNodeNums, overloadNodeListD, degreeD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, overloadEdgeListD, activeOverloadNodePointersD, isFinishedDevice); cudaDeviceSynchronize(); cudaMemcpy(&isFinishedHost, isFinishedDevice, sizeof(bool), cudaMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); endOverloadGpuProcessing = std::chrono::steady_clock::now(); durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endOverloadGpuProcessing - startOverloadGpuProcessing).count(); /*cout << "iter " << iter << " part " << i << " durationOverloadGpuProcessing " << durationOverloadGpuProcessing << endl;*/ } gpuErrorcheck(cudaPeekAtLastError()) } else { if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); } mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2); //cudaDeviceSynchronize(); //cout << "mixDynamicPartLabel" << " =========cudaDeviceSynchronize()==========" << endl; //cudaMemcpy(label, isActiveD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost); startPreGpuProcessing = std::chrono::steady_clock::now(); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); } cudaDeviceSynchronize(); cudaMemcpy(value, valueD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost); cudaMemcpy(vertexVisitRecord, vertexVisitRecordD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost); uint partNum = 50; uint partSize = testNumEdge / partNum; vector<uint> partVistRecordList(partNum + 1); uint partSizeCursor = 0; for (uint i = 0; i < testNumNodes; i++) { uint edgeStartIndex = nodePointersI[i]; uint edgeEndIndex = nodePointersI[i] + degree[i]; uint maxPartIndex = partSizeCursor * partSize + partSize; if (edgeStartIndex < maxPartIndex && edgeEndIndex < maxPartIndex) { partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i]; } else if (edgeStartIndex < maxPartIndex && edgeEndIndex >= maxPartIndex) { partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (maxPartIndex - edgeStartIndex); partSizeCursor += 1; partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * (edgeEndIndex - maxPartIndex); } else { partSizeCursor += 1; partVistRecordList[partSizeCursor] += vertexVisitRecord[i] * degree[i]; } } for (uint i = 0; i < partNum + 1; i++) { cout << "part " << i << " is " << partVistRecordList[i] << endl; } for (uint i = 0; i < partNum + 1; i++) { cout << partVistRecordList[i] << "\t"; } transferSum += max_partition_size; cout << "transferSum: " << transferSum * 4 << "byte" << endl; cout << "iterationSum " << iter << endl; double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter; double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge; cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl; cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "finish time : " << durationRead << " ms" << endl; cout << "total time : " << durationRead + testDuration << " ms" << endl; cout << "cpu time : " << durationReadCpu << " ms" << endl; cout << "pre fact processing time : " << durationGpuProcessing << " ms" << endl; cout << "overload fact processing time : " << durationOverloadGpuProcessing << " ms" << endl; cout << "durationMemoryTraverse : " << durationMemoryTraverse << " ms" << endl; cout << "durationOverloadGpuProcessing : " << durationOverloadGpuProcessing << " ms" << endl; cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl; cout << "swap processing time : " << durationSwap << " ms" << endl; cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl; cout << "swapValidNodeSum " << swapValidNodeSum << " swapValidEdgeSum " << swapValidEdgeSum << endl; cout << "swapNotValidNodeSum " << swapNotValidNodeSum << " swapNotValidEdgeSum " << swapNotValidEdgeSum << " visitSum " << visitEdgeSum << " swapInEdgeSum " << swapInEdgeSum << endl; cout << "headSum " << headSum << " tailSum " << tailSum << endl; /*cudaFree(nodePointerD); cudaFree(staticEdgeListD); cudaFree(degreeD); cudaFree(isActiveD1); cudaFree(isActiveD2); cudaFree(valueD); cudaFree(activeNodeListD); cudaFree(activeNodeLabelingPrefixD); cudaFree(activeOverloadNodePointersD); cudaFree(activeOverloadDegreeD); cudaFree(isInStaticD); cudaFree(staticNodePointerD); cudaFree(overloadNodeListD); delete[] label; delete[] degree; delete[] value; delete[] activeNodeList; delete[] activeOverloadNodePointers; delete[] isInStatic; delete[] overloadNodeList; delete[] staticNodePointer; delete[] fragmentData; return durationRead;*/ } void ccShareTrace(string ccPath) { uint testNumNodes = 0; ulong testNumEdge = 0; uint *nodePointersI; uint *edgeList; auto startReadGraph = std::chrono::steady_clock::now(); ifstream infile(ccPath, ios::in | ios::binary); infile.read((char *) &testNumNodes, sizeof(uint)); uint numEdge = 0; infile.read((char *) &numEdge, sizeof(uint)); testNumEdge = numEdge; cout << "vertex num: " << testNumNodes << " edge num: " << testNumEdge << endl; nodePointersI = new uint[testNumNodes]; infile.read((char *) nodePointersI, sizeof(uint) * testNumNodes); gpuErrorcheck(cudaMallocManaged(&edgeList, (numEdge) * sizeof(uint))); cudaMemAdvise(nodePointersI, (testNumNodes + 1) * sizeof(uint), cudaMemAdviseSetReadMostly, 0); cudaMemAdvise(edgeList, (numEdge) * sizeof(uint), cudaMemAdviseSetReadMostly, 0); infile.read((char *) edgeList, sizeof(uint) * testNumEdge); infile.close(); //preprocessData(nodePointersI, edgeList, testNumNodes, testNumEdge); auto endReadGraph = std::chrono::steady_clock::now(); long durationReadGraph = std::chrono::duration_cast<std::chrono::milliseconds>( endReadGraph - startReadGraph).count(); cout << "read graph time : " << durationReadGraph << "ms" << endl; int testTimes = 1; long timeSum = 0; for (int i = 0; i < testTimes; i++) { timeSum += ccCaculateInShareTrace(testNumNodes, testNumEdge, nodePointersI, edgeList); //timeSum += bfsCaculateInShare(testNumNodes, testNumEdge, nodePointersI, edgeList, 53037907); break; } cout << "need cpu " << needCpu << " not need cpu " << notNeedCpu << endl; cout << "processingTime " << processingTimeSum / testTimes << " cpu time " << cpuTimeSum / testTimes << " all Time " << allTimeSum / testTimes << endl; cout << "mean time is " << timeSum / testTimes << endl; cout << "mean validSwapSum is " << validSwapSum / testTimes << endl; cout << trestSum << endl; } long ccCaculateInShareTrace(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList) { auto start = std::chrono::steady_clock::now(); uint *degree = new uint[testNumNodes]; uint *value = new uint[testNumNodes]; uint sourceCode = 0; auto startPreCaculate = std::chrono::steady_clock::now(); for (uint i = 0; i < testNumNodes - 1; i++) { degree[i] = nodePointersI[i + 1] - nodePointersI[i]; } degree[testNumNodes - 1] = testNumEdge - nodePointersI[testNumNodes - 1]; bool *label = new bool[testNumNodes]; for (uint i = 0; i < testNumNodes; i++) { label[i] = true; value[i] = i; } label[sourceCode] = true; value[sourceCode] = 1; uint *activeNodeListD; uint *degreeD; uint *valueD; bool *labelD; uint *nodePointersD; cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint)); cudaMalloc(&nodePointersD, testNumNodes * sizeof(uint)); cudaMalloc(&degreeD, testNumNodes * sizeof(uint)); cudaMalloc(&valueD, testNumNodes * sizeof(uint)); cudaMalloc(&labelD, testNumNodes * sizeof(bool)); cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); cudaMemcpy(labelD, label, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(nodePointersD, nodePointersI, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice); //cacaulate the active node And make active node array uint *activeNodeLabelingD; gpuErrorcheck(cudaMalloc(&activeNodeLabelingD, testNumNodes * sizeof(unsigned int))); uint *activeNodeLabelingPrefixD; gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; auto startProcessing = std::chrono::steady_clock::now(); while (activeNodesNum > 0) { iter++; thrust::exclusive_scan(ptr_labeling, ptr_labeling + testNumNodes, ptr_labeling_prefixsum); setActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, labelD, activeNodeLabelingPrefixD); setLabelDefault<<<grid, block>>>(activeNodesNum, activeNodeListD, labelD); cc_kernel<<<grid, block>>>(activeNodesNum, activeNodeListD, nodePointersD, degreeD, edgeList, valueD, labelD); cudaDeviceSynchronize(); gpuErrorcheck(cudaPeekAtLastError()); for (uint j = 0; j < testNumEdge; j++) { uint temp = edgeList[j]; if (temp >= 0) { uint a = temp + 1; } } setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; cout << "iter: " << iter << " activeNodes: " << activeNodesNum << endl; } cudaDeviceSynchronize(); cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); long durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "iter sum is " << iter << " finish time : " << durationRead << " ms" << endl; return durationRead; } long ccCaculateCommonMemoryInnerAsyncRandom(uint testNumNodes, uint testNumEdge, uint *nodePointersI, uint *edgeList, float adviseK) { cout << "=========ccCaculateCommonMemoryInnerAsync1========" << endl; ulong edgeIterationMax = 0; auto start = std::chrono::steady_clock::now(); auto startPreCaculate = std::chrono::steady_clock::now(); //CPU long durationRead; ulong transferSum = 0; unsigned long max_partition_size; unsigned long total_gpu_size; uint maxStaticNode = 0; uint *degree; uint *value; uint *label; bool *isInStatic; uint *overloadNodeList; uint *staticNodePointer; uint *activeNodeList; uint *activeOverloadNodePointers; vector<PartEdgeListInfo> partEdgeListInfoArr; /* * overloadEdgeList overload edge list in every iteration * */ uint *overloadEdgeList; FragmentData *fragmentData; bool isFromTail = true; //GPU uint *staticEdgeListD; uint *overloadEdgeListD; bool *isInStaticD; uint *overloadNodeListD; uint *staticNodePointerD; uint *nodePointerD; uint *degreeD; // async need two labels uint *isActiveD1; uint *isActiveD2; uint *isStaticActive; uint *isOverloadActive; uint *valueD; uint *activeNodeListD; uint *activeNodeLabelingPrefixD; uint *activeOverloadNodePointersD; uint *activeOverloadDegreeD; bool *isFinishedDevice; degree = new uint[testNumNodes]; value = new uint[testNumNodes]; label = new uint[testNumNodes]; isInStatic = new bool[testNumNodes]; overloadNodeList = new uint[testNumNodes]; staticNodePointer = new uint[testNumNodes]; activeNodeList = new uint[testNumNodes]; activeOverloadNodePointers = new uint[testNumNodes]; getMaxPartitionSize(max_partition_size, total_gpu_size, testNumNodes, adviseK, sizeof(uint), testNumEdge, 15); gpuErrorcheck(cudaMalloc(&isFinishedDevice, 1 * sizeof(bool))); //caculate degree calculateDegree(testNumNodes, nodePointersI, testNumEdge, degree); //memcpy(staticNodePointer, nodePointersI, testNumNodes * sizeof(uint)); uint edgesInStatic = 0; float startRate = (1 - (float) max_partition_size / (float) testNumEdge) / 2; uint startIndex = (float) testNumNodes * startRate; uint tempStaticSum = 0; /*for (uint i = testNumNodes - 1; i >= 0; i--) { tempStaticSum += degree[i]; if (tempStaticSum > max_partition_size) { startIndex = i; break; } }*/ //startIndex = 0; if (nodePointersI[startIndex] + max_partition_size > testNumEdge) { startIndex = (float) testNumNodes * 0.1f; } for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; if (i >= startIndex && nodePointersI[i] < nodePointersI[startIndex] + max_partition_size - degree[i]) { isInStatic[i] = true; staticNodePointer[i] = nodePointersI[i] - nodePointersI[startIndex]; if (i > maxStaticNode) { maxStaticNode = i; } edgesInStatic += degree[i]; } else { isInStatic[i] = false; } } gpuErrorcheck(cudaMalloc(&staticEdgeListD, max_partition_size * sizeof(uint))); auto startmove = std::chrono::steady_clock::now(); gpuErrorcheck( cudaMemcpy(staticEdgeListD, edgeList + nodePointersI[startIndex], max_partition_size * sizeof(uint), cudaMemcpyHostToDevice)); auto endMove = std::chrono::steady_clock::now(); long testDuration = std::chrono::duration_cast<std::chrono::milliseconds>( endMove - startmove).count(); cout << "move duration " << testDuration << endl; gpuErrorcheck(cudaMalloc(&isInStaticD, testNumNodes * sizeof(bool))) gpuErrorcheck(cudaMalloc(&overloadNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&staticNodePointerD, testNumNodes * sizeof(uint))) gpuErrorcheck( cudaMemcpy(staticNodePointerD, staticNodePointer, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice); uint partOverloadSize = total_gpu_size - max_partition_size; uint overloadSize = testNumEdge - edgesInStatic; cout << " partOverloadSize " << partOverloadSize << " overloadSize " << overloadSize << endl; overloadEdgeList = (uint *) malloc(overloadSize * sizeof(uint)); gpuErrorcheck(cudaMalloc(&overloadEdgeListD, partOverloadSize * sizeof(uint))); gpuErrorcheck(cudaMalloc(&degreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isActiveD1, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isActiveD2, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isStaticActive, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&isOverloadActive, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&valueD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeNodeLabelingPrefixD, testNumNodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&activeNodeListD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeOverloadNodePointersD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMalloc(&activeOverloadDegreeD, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemcpy(degreeD, degree, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); //cacaulate the active node And make active node array dim3 grid = dim3(56, 1, 1); dim3 block = dim3(1024, 1, 1); //setLabeling<<<grid, block>>>(testNumNodes, labelD, activeNodeLabelingD); thrust::device_ptr<unsigned int> ptr_labeling(isActiveD1); thrust::device_ptr<unsigned int> ptr_labelingTest(isActiveD2); thrust::device_ptr<unsigned int> ptr_labeling_static(isStaticActive); thrust::device_ptr<unsigned int> ptr_labeling_overload(isOverloadActive); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(activeNodeLabelingPrefixD); thrust::device_ptr<unsigned int> ptrOverloadDegree(activeOverloadDegreeD); thrust::device_ptr<unsigned int> ptrOverloadPrefixsum(activeOverloadNodePointersD); uint activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); int iter = 0; uint nodeSum = activeNodesNum; ulong overloadEdgeSum = 0; auto startCpu = std::chrono::steady_clock::now(); auto endReadCpu = std::chrono::steady_clock::now(); long durationReadCpu = 0; auto startSwap = std::chrono::steady_clock::now(); auto endSwap = std::chrono::steady_clock::now(); long durationSwap = 0; auto startGpuProcessing = std::chrono::steady_clock::now(); auto endGpuProcessing = std::chrono::steady_clock::now(); long durationGpuProcessing = 0; auto startOverloadGpuProcessing = std::chrono::steady_clock::now(); auto endOverloadGpuProcessing = std::chrono::steady_clock::now(); long durationOverloadGpuProcessing = 0; auto startPreGpuProcessing = std::chrono::steady_clock::now(); auto endPreGpuProcessing = std::chrono::steady_clock::now(); long durationPreGpuProcessing = 0; auto endPreCaculate = std::chrono::steady_clock::now(); long durationPreCaculate = std::chrono::duration_cast<std::chrono::milliseconds>( endPreCaculate - startPreCaculate).count(); cout << "durationPreCaculate time : " << durationPreCaculate << " ms" << endl; cudaStream_t steamStatic, streamDynamic; cudaStreamCreate(&steamStatic); cudaStreamCreate(&streamDynamic); auto startMemoryTraverse = std::chrono::steady_clock::now(); auto endMemoryTraverse = std::chrono::steady_clock::now(); long durationMemoryTraverse = 0; //uint cursorStartSwap = staticFragmentNum + 1; uint swapValidNodeSum = 0; uint swapValidEdgeSum = 0; uint swapNotValidNodeSum = 0; uint swapNotValidEdgeSum = 0; uint visitEdgeSum = 0; uint swapInEdgeSum = 0; uint headSum; uint tailSum; long TIME = 0; int testTimes = 10; for (int testIndex = 0; testIndex < testTimes; testIndex++) { for (uint i = 0; i < testNumNodes; i++) { label[i] = 1; value[i] = i; } cudaMemcpy(isInStaticD, isInStatic, testNumNodes * sizeof(bool), cudaMemcpyHostToDevice); gpuErrorcheck(cudaMemcpy(valueD, value, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(isActiveD1, label, testNumNodes * sizeof(uint), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemset(isActiveD2, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isStaticActive, 0, testNumNodes * sizeof(uint))); gpuErrorcheck(cudaMemset(isOverloadActive, 0, testNumNodes * sizeof(uint))); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); iter = 0; auto startProcessing = std::chrono::steady_clock::now(); auto startTest = std::chrono::steady_clock::now(); auto endTest = std::chrono::steady_clock::now(); long durationTest = 0; while (activeNodesNum > 0) { iter++; //cout << "iter " << iter << " activeNodesNum " << activeNodesNum << endl; startPreGpuProcessing = std::chrono::steady_clock::now(); //cleanStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isStaticActive, isOverloadActive); setStaticAndOverloadLabel<<<grid, block>>>(testNumNodes, isActiveD1, isStaticActive, isOverloadActive, isInStaticD); uint staticNodeNum = thrust::reduce(ptr_labeling_static, ptr_labeling_static + testNumNodes); if (staticNodeNum > 0) { //cout << "iter " << iter << " staticNodeNum " << staticNodeNum << endl; thrust::exclusive_scan(ptr_labeling_static, ptr_labeling_static + testNumNodes, ptr_labeling_prefixsum); setStaticActiveNodeArray<<<grid, block>>>(testNumNodes, activeNodeListD, isStaticActive, activeNodeLabelingPrefixD); } uint overloadNodeNum = thrust::reduce(ptr_labeling_overload, ptr_labeling_overload + testNumNodes); uint overloadEdgeNum = 0; if (overloadNodeNum > 0) { //cout << "iter " << iter << " overloadNodeNum " << overloadNodeNum << endl; thrust::exclusive_scan(ptr_labeling_overload, ptr_labeling_overload + testNumNodes, ptr_labeling_prefixsum); setOverloadNodePointerSwap<<<grid, block>>>(testNumNodes, overloadNodeListD, activeOverloadDegreeD, isOverloadActive, activeNodeLabelingPrefixD, degreeD); thrust::exclusive_scan(ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, activeOverloadNodePointersD); overloadEdgeNum = thrust::reduce(thrust::device, ptrOverloadDegree, ptrOverloadDegree + overloadNodeNum, 0); //cout << "iter " << iter << " overloadEdgeNum " << overloadEdgeNum << endl; overloadEdgeSum += overloadEdgeNum; if (overloadEdgeNum > edgeIterationMax) { edgeIterationMax = overloadEdgeNum; } } endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); startGpuProcessing = std::chrono::steady_clock::now(); mixDynamicPartLabel<<<grid, block, 0, steamStatic>>>(staticNodeNum, 0, activeNodeListD, isActiveD1, isActiveD2); thread staticCCKernel = thread(ccKernelThread, staticNodeNum, activeNodeListD, staticNodePointerD, degreeD, staticEdgeListD, valueD, isActiveD1, isActiveD2, isFinishedDevice, grid, block, steamStatic); /*if (staticCCKernel.joinable()) { staticCCKernel.join(); }*/ if (overloadNodeNum > 0) { startCpu = std::chrono::steady_clock::now(); /*cudaMemcpyAsync(activeNodeList, activeNodeListD, activeNodesNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic);*/ cudaMemcpyAsync(overloadNodeList, overloadNodeListD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic); cudaMemcpyAsync(activeOverloadNodePointers, activeOverloadNodePointersD, overloadNodeNum * sizeof(uint), cudaMemcpyDeviceToHost, streamDynamic); int threadNum = 20; if (overloadNodeNum < 50) { threadNum = 1; } thread runThreads[threadNum]; for (int i = 0; i < threadNum; i++) { runThreads[i] = thread(fillDynamic, i, threadNum, 0, overloadNodeNum, degree, activeOverloadNodePointers, nodePointersI, overloadNodeList, overloadEdgeList, edgeList); } for (unsigned int t = 0; t < threadNum; t++) { runThreads[t].join(); } caculatePartInfoForEdgeList(activeOverloadNodePointers, overloadNodeList, degree, partEdgeListInfoArr, overloadNodeNum, partOverloadSize, overloadEdgeNum); endReadCpu = std::chrono::steady_clock::now(); durationReadCpu += std::chrono::duration_cast<std::chrono::milliseconds>(endReadCpu - startCpu).count(); if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); for (auto &i : partEdgeListInfoArr) { startMemoryTraverse = std::chrono::steady_clock::now(); gpuErrorcheck(cudaMemcpy(overloadEdgeListD, overloadEdgeList + activeOverloadNodePointers[i.partStartIndex], i.partEdgeNums * sizeof(uint), cudaMemcpyHostToDevice)) transferSum += i.partEdgeNums; endMemoryTraverse = std::chrono::steady_clock::now(); durationMemoryTraverse += std::chrono::duration_cast<std::chrono::milliseconds>( endMemoryTraverse - startMemoryTraverse).count(); /*cout << "iter " << iter << " part " << i << " durationMemoryTraverse " << durationMemoryTraverse << endl;*/ startOverloadGpuProcessing = std::chrono::steady_clock::now(); mixDynamicPartLabel<<<grid, block, 0, streamDynamic>>>(i.partActiveNodeNums, i.partStartIndex, overloadNodeListD, isActiveD1, isActiveD2); uint itr = 0; bool isFinishedHost = true; do { itr++; isFinishedHost = true; cudaMemcpy(isFinishedDevice, &isFinishedHost, sizeof(bool), cudaMemcpyHostToDevice); cc_kernelDynamicSwap2Label<<<grid, block, 0, streamDynamic>>>(i.partStartIndex, i.partActiveNodeNums, overloadNodeListD, degreeD, valueD, itr % 2 == 1 ? isActiveD1 : isActiveD2, itr % 2 == 1 ? isActiveD2 : isActiveD1, overloadEdgeListD, activeOverloadNodePointersD, isFinishedDevice); cudaDeviceSynchronize(); cudaMemcpy(&isFinishedHost, isFinishedDevice, sizeof(bool), cudaMemcpyDeviceToHost); isFinishedHost = true; } while (!isFinishedHost); endOverloadGpuProcessing = std::chrono::steady_clock::now(); durationOverloadGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endOverloadGpuProcessing - startOverloadGpuProcessing).count(); /*cout << "iter " << iter << " part " << i << " durationOverloadGpuProcessing " << durationOverloadGpuProcessing << endl;*/ } gpuErrorcheck(cudaPeekAtLastError()) } else { if (staticCCKernel.joinable()) { staticCCKernel.join(); } endGpuProcessing = std::chrono::steady_clock::now(); durationGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endGpuProcessing - startGpuProcessing).count(); } mixCommonLabel<<<grid, block, 0, streamDynamic>>>(testNumNodes, isActiveD1, isActiveD2); //cudaDeviceSynchronize(); //cout << "mixDynamicPartLabel" << " =========cudaDeviceSynchronize()==========" << endl; //cudaMemcpy(label, isActiveD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost); startPreGpuProcessing = std::chrono::steady_clock::now(); activeNodesNum = thrust::reduce(ptr_labeling, ptr_labeling + testNumNodes); nodeSum += activeNodesNum; endPreGpuProcessing = std::chrono::steady_clock::now(); durationPreGpuProcessing += std::chrono::duration_cast<std::chrono::milliseconds>( endPreGpuProcessing - startPreGpuProcessing).count(); } cudaDeviceSynchronize(); cudaMemcpy(value, valueD, testNumNodes * sizeof(uint), cudaMemcpyDeviceToHost); transferSum += max_partition_size; cout << "transferSum: " << transferSum * 4 << "byte" << endl; cout << "iterationSum " << iter << endl; double edgeIterationAvg = (double) overloadEdgeSum / (double) testNumEdge / iter; double edgeIterationMaxAvg = (double) edgeIterationMax / (double) testNumEdge; cout << "edgeIterationAvg " << edgeIterationAvg << " edgeIterationMaxAvg " << edgeIterationMaxAvg << endl; cout << "nodeSum: " << nodeSum << endl; auto endRead = std::chrono::steady_clock::now(); durationRead = std::chrono::duration_cast<std::chrono::milliseconds>(endRead - startProcessing).count(); cout << "finish time : " << durationRead << " ms" << endl; cout << "total time : " << durationRead + testDuration << " ms" << endl; cout << "cpu time : " << durationReadCpu << " ms" << endl; cout << "pre fact processing time : " << durationGpuProcessing << " ms" << endl; cout << "overload fact processing time : " << durationOverloadGpuProcessing << " ms" << endl; cout << "durationMemoryTraverse : " << durationMemoryTraverse << " ms" << endl; cout << "durationOverloadGpuProcessing : " << durationOverloadGpuProcessing << " ms" << endl; cout << "gpu pre processing time : " << durationPreGpuProcessing << " ms" << endl; cout << "swap processing time : " << durationSwap << " ms" << endl; cout << "overloadEdgeSum : " << overloadEdgeSum << " " << endl; cout << "swapValidNodeSum " << swapValidNodeSum << " swapValidEdgeSum " << swapValidEdgeSum << endl; cout << "swapNotValidNodeSum " << swapNotValidNodeSum << " swapNotValidEdgeSum " << swapNotValidEdgeSum << " visitSum " << visitEdgeSum << " swapInEdgeSum " << swapInEdgeSum << endl; cout << "headSum " << headSum << " tailSum " << tailSum << endl; TIME += durationRead; } cout << "TIME " << (float) TIME / (float) testTimes << endl; /*cudaFree(nodePointerD); cudaFree(staticEdgeListD); cudaFree(degreeD); cudaFree(isActiveD1); cudaFree(isActiveD2); cudaFree(valueD); cudaFree(activeNodeListD); cudaFree(activeNodeLabelingPrefixD); cudaFree(activeOverloadNodePointersD); cudaFree(activeOverloadDegreeD); cudaFree(isInStaticD); cudaFree(staticNodePointerD); cudaFree(overloadNodeListD); delete[] label; delete[] degree; delete[] value; delete[] activeNodeList; delete[] activeOverloadNodePointers; delete[] isInStatic; delete[] overloadNodeList; delete[] staticNodePointer; delete[] fragmentData; return durationRead;*/ }
2a3d9498938633adfa4afb2b7b8c9e0b2fd232a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_ListOffsetArray_reduce_nonlocal_preparenext_64.cpp", line) #include "standard_parallel_algorithms.h" #include "awkward/kernels.h" __global__ void awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_initialize_distincts( int64_t* distincts, int64_t distinctlen) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < distinctlen) { distincts[thread_id] = -1; } } __global__ void awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_filter_k( int64_t* offsetscopy, const int64_t* offsets, int8_t* filter_k, int64_t length) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < length) { if (offsetscopy[thread_id] < offsets[thread_id + 1]) { filter_k[thread_id] = 1; } } } ERROR awkward_ListOffsetArray_reduce_nonlocal_preparenext_64( int64_t* nextcarry, int64_t* nextparents, int64_t nextlen, int64_t* maxnextparents, int64_t* distincts, int64_t distinctslen, int64_t* offsetscopy, const int64_t* offsets, int64_t length, const int64_t* parents, int64_t maxcount) { *maxnextparents = 0; dim3 blocks_per_grid = blocks(distinctslen); dim3 threads_per_block = threads(distinctslen); hipLaunchKernelGGL(( awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_initialize_distincts), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, distincts, distinctslen); blocks_per_grid = blocks(nextlen); threads_per_block = threads(nextlen); int8_t* f; HANDLE_ERROR(hipMalloc(k_mask, sizeof(int8_t) * length * nextlen)); HANDLE_ERROR(hipMemset(k_mask, 0, sizeof(int8_t) * length * nextlen)); awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_k_mask( k_mask, ) int64_t k = 0; while (k < nextlen) { int64_t j = 0; for (int64_t i = 0; i < length; i++) { if (offsetscopy[i] < offsets[i + 1]) { int64_t diff = offsetscopy[i] - offsets[i]; int64_t parent = parents[i]; nextcarry[k] = offsetscopy[i]; nextparents[k] = parent*maxcount + diff; if (*maxnextparents < nextparents[k]) { *maxnextparents = nextparents[k]; } if (distincts[nextparents[k]] == -1) { distincts[nextparents[k]] = j; j++; } k++; offsetscopy[i]++; } } } return success(); }
2a3d9498938633adfa4afb2b7b8c9e0b2fd232a1.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_ListOffsetArray_reduce_nonlocal_preparenext_64.cpp", line) #include "standard_parallel_algorithms.h" #include "awkward/kernels.h" __global__ void awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_initialize_distincts( int64_t* distincts, int64_t distinctlen) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < distinctlen) { distincts[thread_id] = -1; } } __global__ void awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_filter_k( int64_t* offsetscopy, const int64_t* offsets, int8_t* filter_k, int64_t length) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < length) { if (offsetscopy[thread_id] < offsets[thread_id + 1]) { filter_k[thread_id] = 1; } } } ERROR awkward_ListOffsetArray_reduce_nonlocal_preparenext_64( int64_t* nextcarry, int64_t* nextparents, int64_t nextlen, int64_t* maxnextparents, int64_t* distincts, int64_t distinctslen, int64_t* offsetscopy, const int64_t* offsets, int64_t length, const int64_t* parents, int64_t maxcount) { *maxnextparents = 0; dim3 blocks_per_grid = blocks(distinctslen); dim3 threads_per_block = threads(distinctslen); awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_initialize_distincts<<<blocks_per_grid, threads_per_block>>>( distincts, distinctslen); blocks_per_grid = blocks(nextlen); threads_per_block = threads(nextlen); int8_t* f; HANDLE_ERROR(cudaMalloc(k_mask, sizeof(int8_t) * length * nextlen)); HANDLE_ERROR(cudaMemset(k_mask, 0, sizeof(int8_t) * length * nextlen)); awkward_ListOffsetArray_reduce_nonlocal_preparenext_64_k_mask( k_mask, ) int64_t k = 0; while (k < nextlen) { int64_t j = 0; for (int64_t i = 0; i < length; i++) { if (offsetscopy[i] < offsets[i + 1]) { int64_t diff = offsetscopy[i] - offsets[i]; int64_t parent = parents[i]; nextcarry[k] = offsetscopy[i]; nextparents[k] = parent*maxcount + diff; if (*maxnextparents < nextparents[k]) { *maxnextparents = nextparents[k]; } if (distincts[nextparents[k]] == -1) { distincts[nextparents[k]] = j; j++; } k++; offsetscopy[i]++; } } } return success(); }
cd9896dd7881d01d4312efe043a7d9290ce16599.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_power.h" #include "hip/hip_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_power_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift, const float power,\ const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ out_data[tid] = pow(in_data[tid] * scale + shift, power); } } template <typename Dtype> __global__ void ker_scale_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift,\ const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ out_data[tid] = in_data[tid] * scale + shift; } } template <typename Dtype> __global__ void ker_power_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift, const float power,\ const int* out_shape, const int* out_stride, const int* in_stride, const int num_axis, const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ int in_offset = 0; int out_offset = 0; int valid_stride = 1; for (int i = num_axis - 1; i >= 0; --i) { int id = (tid / valid_stride) % out_shape[i]; in_offset += id * in_stride[i]; out_offset += id * out_stride[i]; valid_stride *= out_shape[i]; } out_data[out_offset] = pow(in_data[in_offset] * scale + shift, power); } } template <typename Dtype> __global__ void ker_scale_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift,\ const int* out_shape, const int* out_stride, const int* in_stride, const int num_axis, const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ int in_offset = 0; int out_offset = 0; int valid_stride = 1; for (int i = num_axis - 1; i >= 0; --i) { int id = (tid / valid_stride) % out_shape[i]; in_offset += id * in_stride[i]; out_offset += id * out_stride[i]; valid_stride *= out_shape[i]; } //printf("%d, %d, %d\n", tid, in_offset, out_offset); out_data[out_offset] = in_data[in_offset] * scale + shift; //printf("out_offset:%d, %f\n", out_offset, out_data[out_offset]); } } template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberPower<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\ const std::vector<DataTensor_in *>& inputs, \ std::vector<DataTensor_out *>& outputs, \ PowerParam<OpTensor>& param) { const InDataType* in_data = inputs[0]->data(); OutDataType* out_data = outputs[0]->mutable_data(); hipStream_t cuda_stream = this->_ctx.get_compute_stream(); int count = outputs[0]->valid_size(); const float scale = param.scale; const float shift = param.shift; const float power = param.power; if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { if (power == 1) { hipLaunchKernelGGL(( ker_scale_fwd<OpDataType>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ out_data, count, scale, shift, in_data); } else { hipLaunchKernelGGL(( ker_power_fwd<OpDataType>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ out_data, count, scale, shift, power, in_data); } } else { const int* i_stride = _in_steps.data(); const int* o_stride = _out_steps.data(); const int* valid_shape = _out_valid_shape.data(); if (power == 1) { hipLaunchKernelGGL(( ker_scale_fwd<OpDataType>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ out_data, count, scale, shift, valid_shape, o_stride, i_stride, outputs[0]->dims(), in_data); } else { hipLaunchKernelGGL(( ker_power_fwd<OpDataType>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ out_data, count, scale, shift, power, valid_shape, o_stride, i_stride, outputs[0]->dims(), in_data); } } return SaberSuccess; } } }
cd9896dd7881d01d4312efe043a7d9290ce16599.cu
#include "saber/funcs/impl/cuda/saber_power.h" #include "cuda_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_power_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift, const float power,\ const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ out_data[tid] = pow(in_data[tid] * scale + shift, power); } } template <typename Dtype> __global__ void ker_scale_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift,\ const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ out_data[tid] = in_data[tid] * scale + shift; } } template <typename Dtype> __global__ void ker_power_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift, const float power,\ const int* out_shape, const int* out_stride, const int* in_stride, const int num_axis, const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ int in_offset = 0; int out_offset = 0; int valid_stride = 1; for (int i = num_axis - 1; i >= 0; --i) { int id = (tid / valid_stride) % out_shape[i]; in_offset += id * in_stride[i]; out_offset += id * out_stride[i]; valid_stride *= out_shape[i]; } out_data[out_offset] = pow(in_data[in_offset] * scale + shift, power); } } template <typename Dtype> __global__ void ker_scale_fwd(Dtype * out_data, \ const int count, const float scale,\ const float shift,\ const int* out_shape, const int* out_stride, const int* in_stride, const int num_axis, const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ int in_offset = 0; int out_offset = 0; int valid_stride = 1; for (int i = num_axis - 1; i >= 0; --i) { int id = (tid / valid_stride) % out_shape[i]; in_offset += id * in_stride[i]; out_offset += id * out_stride[i]; valid_stride *= out_shape[i]; } //printf("%d, %d, %d\n", tid, in_offset, out_offset); out_data[out_offset] = in_data[in_offset] * scale + shift; //printf("out_offset:%d, %f\n", out_offset, out_data[out_offset]); } } template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberPower<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\ const std::vector<DataTensor_in *>& inputs, \ std::vector<DataTensor_out *>& outputs, \ PowerParam<OpTensor>& param) { const InDataType* in_data = inputs[0]->data(); OutDataType* out_data = outputs[0]->mutable_data(); cudaStream_t cuda_stream = this->_ctx.get_compute_stream(); int count = outputs[0]->valid_size(); const float scale = param.scale; const float shift = param.shift; const float power = param.power; if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { if (power == 1) { ker_scale_fwd<OpDataType>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, count, scale, shift, in_data); } else { ker_power_fwd<OpDataType>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, count, scale, shift, power, in_data); } } else { const int* i_stride = _in_steps.data(); const int* o_stride = _out_steps.data(); const int* valid_shape = _out_valid_shape.data(); if (power == 1) { ker_scale_fwd<OpDataType>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, count, scale, shift, valid_shape, o_stride, i_stride, outputs[0]->dims(), in_data); } else { ker_power_fwd<OpDataType>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, count, scale, shift, power, valid_shape, o_stride, i_stride, outputs[0]->dims(), in_data); } } return SaberSuccess; } } }
fa62626c6e05c784bd2c6d8bfd2da4b63bdc5d74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zswapblk.cu, normal z -> c, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #define BLOCK_SIZE 64 typedef struct { magmaFloatComplex *A; magmaFloatComplex *B; int n, ldda, lddb, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_cswapblk_params_t; /******************************************************************************/ __global__ void magmagpu_cswapblkrm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if ( y < params.n ) { magmaFloatComplex *A = params.A + y - params.ldda; magmaFloatComplex *B = params.B + y; for( int i = 0; i < params.npivots; i++ ) { A += params.ldda; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A; magmaFloatComplex *tmp2 = B + params.ipiv[i]*params.lddb; *A = *tmp2; *tmp2 = tmp1; } } } /******************************************************************************/ __global__ void magmagpu_cswapblkcm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.ldda; unsigned int offset2 = y*params.lddb; if ( y < params.n ) { magmaFloatComplex *A = params.A + offset1 - 1; magmaFloatComplex *B = params.B + offset2; for( int i = 0; i < params.npivots; i++ ) { A++; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A; magmaFloatComplex *tmp2 = B + params.ipiv[i]; *A = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /***************************************************************************//** Blocked version: swap several pairs of lines. Used in magma_ctstrf() and magma_cssssm(). @ingroup magma_swapblk *******************************************************************************/ extern "C" void magmablas_cswapblk( magma_order_t order, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dB, magma_int_t lddb, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( magma_ceildiv( n, blocksize ) ); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA+k, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_cswapblkcm), dim3(blocks), dim3(blocksize), 0, queue->cuda_stream() , params ); } } else { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA+k*ldda, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_cswapblkrm), dim3(blocks), dim3(blocksize), 0, queue->cuda_stream() , params ); } } }
fa62626c6e05c784bd2c6d8bfd2da4b63bdc5d74.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zswapblk.cu, normal z -> c, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #define BLOCK_SIZE 64 typedef struct { magmaFloatComplex *A; magmaFloatComplex *B; int n, ldda, lddb, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_cswapblk_params_t; /******************************************************************************/ __global__ void magmagpu_cswapblkrm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if ( y < params.n ) { magmaFloatComplex *A = params.A + y - params.ldda; magmaFloatComplex *B = params.B + y; for( int i = 0; i < params.npivots; i++ ) { A += params.ldda; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A; magmaFloatComplex *tmp2 = B + params.ipiv[i]*params.lddb; *A = *tmp2; *tmp2 = tmp1; } } } /******************************************************************************/ __global__ void magmagpu_cswapblkcm( magmagpu_cswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.ldda; unsigned int offset2 = y*params.lddb; if ( y < params.n ) { magmaFloatComplex *A = params.A + offset1 - 1; magmaFloatComplex *B = params.B + offset2; for( int i = 0; i < params.npivots; i++ ) { A++; if ( params.ipiv[i] == -1 ) continue; magmaFloatComplex tmp1 = *A; magmaFloatComplex *tmp2 = B + params.ipiv[i]; *A = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /***************************************************************************//** Blocked version: swap several pairs of lines. Used in magma_ctstrf() and magma_cssssm(). @ingroup magma_swapblk *******************************************************************************/ extern "C" void magmablas_cswapblk( magma_order_t order, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dB, magma_int_t lddb, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( magma_ceildiv( n, blocksize ) ); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA+k, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_cswapblkcm<<< blocks, blocksize, 0, queue->cuda_stream() >>>( params ); } } else { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_cswapblk_params_t params = { dA+k*ldda, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_cswapblkrm<<< blocks, blocksize, 0, queue->cuda_stream() >>>( params ); } } }
56ea16f8f3f4f96c3647cd46b4182f5a0d1341fc.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdio> #include <cstring> #include <string> #include <algorithm> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> using namespace std; typedef double ld; typedef long long LL; // namespace output { // const int OutputBufferSize = 1e6+5; // char buffer[OutputBufferSize]; // char *s = buffer; // inline void flush() { // fwrite(buffer, 1, s-buffer, stdout); // s = buffer; // fflush(stdout); // } // inline void print(const char ch) { // // putchar(ch); return; // if (s-buffer>OutputBufferSize-2) flush(); // *s++ = ch; // } // inline void print(char *str) { // while (*str!=0) print(char(*str++)); // } // inline void print(int x) { // // printf("%d", x); return; // char buf[25] = {0}, *p = buf; // if (x<0) print('-'), x=-x; // if (x == 0) print('0'); // while (x) *(++p) = x%10, x/=10; // while (p != buf) print(char(*(p--)+'0')); // } // inline void print(ld x) { // // printf("%.2f", x); // static char buf[100]; // sprintf(buf, "%.2f", x); // print(buf); // } // } // struct ios { // static const int IN_LEN=1<<18|1; // char buf[IN_LEN],*s,*t; // inline char read(){ // return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t?-1:*s++; // } // inline bool isEOF() { // return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t; // } // inline ios & operator >> (int &x){ // static char c11,boo; // for(c11=read(),boo=0;!isdigit(c11);c11=read()){ // if(c11==-1)return *this; // boo|=c11=='-'; // } // for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0'); // boo&&(x=-x); // return *this; // } // inline ios & operator >> (LL &x){ // static char c11,boo; // for(c11=read(),boo=0;!isdigit(c11);c11=read()){ // if(c11==-1)return *this; // boo|=c11=='-'; // } // for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0'); // boo&&(x=-x); // return *this; // } // inline ios &operator >> (char *s) { // int len = 0; // char ch; // for (ch=read(); ch=='\n' || ch == ' '; ch=read()); // if (ch == -1) { // s[len] = 0; // return *this; // } // for (; ch!='\n' && ch != ' ' && ch != -1;ch=read()) // s[len++] = ch; // s[len] = 0; // return *this; // } // inline ios &operator>>(ld &x) // { // char ch; // bool neg = false, dec = false; // double now = 0.1; // for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read()); // if (ch == '-') neg = true; // else if (ch == '.') { x = 0; dec = true; } // else if (ch != -1) x = ch-'0'; // else return *this; // if (!dec) { // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x = x * 10 + ch-'0'; // } // } // if (ch == '.') // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x += now * (ch - '0'); now *= 0.1; // } // if (neg) x = -x; // return *this; // } // inline ios &operator>>(long double &x) // { // char ch; // bool neg = false, dec = false; // double now = 0.1; // for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read()); // if (ch == '-') neg = true; // else if (ch == '.') { x = 0; dec = true; } // else if (ch != -1) x = ch-'0'; // else return *this; // if (!dec) { // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x = x * 10 + ch-'0'; // } // } // if (ch == '.') // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x += now * (ch - '0'); now *= 0.1; // } // if (neg) x = -x; // return *this; // } // } io; inline void handleCudaError(hipError_t err, string name = "fuck") { if (err != hipSuccess) { cerr << name << endl; cerr << hipGetErrorString(err) << endl; exit(0); } } ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c; int an, am, bn, bm; int n, m; void copyMatrix(ld *&src, ld *&dst, int n, int m) { int size = sizeof(ld) * n * m; handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix"); handleCudaError(hipMemcpy(dst, src, size, hipMemcpyHostToDevice), "memcpy in copyMatrix"); // handleCudaError(hipMemcpy(src, dst, size, hipMemcpyDeviceToHost), "check in copyMatrix"); // cerr << "end in copyMatrix" << endl; } // ld *copyMatrixBack(const ld *src, int n, int m) { // ld *res; // int size = sizeof(ld) * n * m; // res = (ld*)malloc(size); // cerr << "in copyMatrixBack: size=" << size << endl; // handleCudaError(hipMemcpy(res, src, size, hipMemcpyDeviceToHost), "memcpy in copyMatrixBack"); // // memcpy(res.a, ptr, size);) // return res; // } template<typename T> __global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) { int i = blockDim.x * blockIdx.x + threadIdx.x, j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= an || j >= bm) return; ld sum = 0; if (i < an && j < bm) { for (int k=0; k<am; ++k) sum += d_a[i * am + k] * d_b[k * bm + j]; } if (i * bm + j < an * bm) d_c[i * bm + j] = sum; // int index = threadIdx.x; // if (index < an * bm) // d_c[index] = 1; } void outputMatrix(ld *a, int n, int m) { // output::print(n); output::print(','); // output::print(m); output::print('\n'); for (int i=0; i<n; ++i) { int base = i * m; // output::print(a[base]); printf("%.2f", a[base]); for (int j=1; j<m; ++j) { // output::print(','); // output::print(a[base + j]); printf(",%.2f", a[base+j]); } // output::print('\n'); putchar('\n'); } } int main() { #ifndef Weaverzhu freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); #endif // io >> an >> am; scanf("%d,%d", &an, &am); // printf("%d %d\n", an, am); exit(0); h_a = (ld*)malloc(sizeof(ld) * an * am); for (int i=0; i<an; ++i) { int base = i * am; scanf("%lf", &h_a[base]); for (int j=1; j<am; ++j) scanf(",%lf", &h_a[base+j]); } scanf("%d,%d", &bn, &bm); // printf("%d %d\n", bn, bm); exit(0); h_b = (ld*)malloc(sizeof(ld) * bn * bm); for (int i=0; i<bn; ++i) { int base = i * bm; scanf("%lf", &h_b[base]); for (int j=1; j<bm; ++j) scanf(",%lf", &h_b[base+j]); } // B.readtrans(); // outputMatrix(h_a, an, am); // outputMatrix(h_b, bn, bm); // exit(0); int block_size = 16; dim3 threads(block_size, block_size); dim3 grid((an + threads.x - 1) / threads.x, (bm + threads.y - 1) / threads.y); n = an; m = bm; // fprintf(stderr, "grid= %d,%d,%d threads= %d,%d,%d\n", grid.x, grid.y, grid.z, threads.x, threads.y, threads.z); // read into main memory copyMatrix(h_a, d_a, an, am); copyMatrix(h_b, d_b, bn, bm); handleCudaError(hipMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c"); // // puts("entering danger"); // matrixMult<<<threads, grid>>>(d_a, d_b, d_c, an, bm, am); // // if (hipGetLastError() != hipSuccess) { // // cerr << "failed in matrixMult" << endl; // // exit(0); // // } else cerr << "looks good in matrixMult" << endl; // // puts("FUCK"); // // ld *c = copyMatrixBack(d_c, n, m); // h_c = (ld*)malloc(sizeof(ld) * n * m); // int size = sizeof(ld) * n * m; // handleCudaError(hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost), "memcpy back"); // outputMatrix(h_c, n, m); // output::flush(); return 0; }
56ea16f8f3f4f96c3647cd46b4182f5a0d1341fc.cu
#include <cmath> #include <cstdio> #include <cstring> #include <string> #include <algorithm> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <device_functions.h> #include <cuda_runtime_api.h> using namespace std; typedef double ld; typedef long long LL; // namespace output { // const int OutputBufferSize = 1e6+5; // char buffer[OutputBufferSize]; // char *s = buffer; // inline void flush() { // fwrite(buffer, 1, s-buffer, stdout); // s = buffer; // fflush(stdout); // } // inline void print(const char ch) { // // putchar(ch); return; // if (s-buffer>OutputBufferSize-2) flush(); // *s++ = ch; // } // inline void print(char *str) { // while (*str!=0) print(char(*str++)); // } // inline void print(int x) { // // printf("%d", x); return; // char buf[25] = {0}, *p = buf; // if (x<0) print('-'), x=-x; // if (x == 0) print('0'); // while (x) *(++p) = x%10, x/=10; // while (p != buf) print(char(*(p--)+'0')); // } // inline void print(ld x) { // // printf("%.2f", x); // static char buf[100]; // sprintf(buf, "%.2f", x); // print(buf); // } // } // struct ios { // static const int IN_LEN=1<<18|1; // char buf[IN_LEN],*s,*t; // inline char read(){ // return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t?-1:*s++; // } // inline bool isEOF() { // return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t; // } // inline ios & operator >> (int &x){ // static char c11,boo; // for(c11=read(),boo=0;!isdigit(c11);c11=read()){ // if(c11==-1)return *this; // boo|=c11=='-'; // } // for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0'); // boo&&(x=-x); // return *this; // } // inline ios & operator >> (LL &x){ // static char c11,boo; // for(c11=read(),boo=0;!isdigit(c11);c11=read()){ // if(c11==-1)return *this; // boo|=c11=='-'; // } // for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0'); // boo&&(x=-x); // return *this; // } // inline ios &operator >> (char *s) { // int len = 0; // char ch; // for (ch=read(); ch=='\n' || ch == ' '; ch=read()); // if (ch == -1) { // s[len] = 0; // return *this; // } // for (; ch!='\n' && ch != ' ' && ch != -1;ch=read()) // s[len++] = ch; // s[len] = 0; // return *this; // } // inline ios &operator>>(ld &x) // { // char ch; // bool neg = false, dec = false; // double now = 0.1; // for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read()); // if (ch == '-') neg = true; // else if (ch == '.') { x = 0; dec = true; } // else if (ch != -1) x = ch-'0'; // else return *this; // if (!dec) { // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x = x * 10 + ch-'0'; // } // } // if (ch == '.') // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x += now * (ch - '0'); now *= 0.1; // } // if (neg) x = -x; // return *this; // } // inline ios &operator>>(long double &x) // { // char ch; // bool neg = false, dec = false; // double now = 0.1; // for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read()); // if (ch == '-') neg = true; // else if (ch == '.') { x = 0; dec = true; } // else if (ch != -1) x = ch-'0'; // else return *this; // if (!dec) { // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x = x * 10 + ch-'0'; // } // } // if (ch == '.') // for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) { // x += now * (ch - '0'); now *= 0.1; // } // if (neg) x = -x; // return *this; // } // } io; inline void handleCudaError(cudaError_t err, string name = "fuck") { if (err != cudaSuccess) { cerr << name << endl; cerr << cudaGetErrorString(err) << endl; exit(0); } } ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c; int an, am, bn, bm; int n, m; void copyMatrix(ld *&src, ld *&dst, int n, int m) { int size = sizeof(ld) * n * m; handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix"); handleCudaError(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice), "memcpy in copyMatrix"); // handleCudaError(cudaMemcpy(src, dst, size, cudaMemcpyDeviceToHost), "check in copyMatrix"); // cerr << "end in copyMatrix" << endl; } // ld *copyMatrixBack(const ld *src, int n, int m) { // ld *res; // int size = sizeof(ld) * n * m; // res = (ld*)malloc(size); // cerr << "in copyMatrixBack: size=" << size << endl; // handleCudaError(cudaMemcpy(res, src, size, cudaMemcpyDeviceToHost), "memcpy in copyMatrixBack"); // // memcpy(res.a, ptr, size);) // return res; // } template<typename T> __global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) { int i = blockDim.x * blockIdx.x + threadIdx.x, j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= an || j >= bm) return; ld sum = 0; if (i < an && j < bm) { for (int k=0; k<am; ++k) sum += d_a[i * am + k] * d_b[k * bm + j]; } if (i * bm + j < an * bm) d_c[i * bm + j] = sum; // int index = threadIdx.x; // if (index < an * bm) // d_c[index] = 1; } void outputMatrix(ld *a, int n, int m) { // output::print(n); output::print(','); // output::print(m); output::print('\n'); for (int i=0; i<n; ++i) { int base = i * m; // output::print(a[base]); printf("%.2f", a[base]); for (int j=1; j<m; ++j) { // output::print(','); // output::print(a[base + j]); printf(",%.2f", a[base+j]); } // output::print('\n'); putchar('\n'); } } int main() { #ifndef Weaverzhu freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); #endif // io >> an >> am; scanf("%d,%d", &an, &am); // printf("%d %d\n", an, am); exit(0); h_a = (ld*)malloc(sizeof(ld) * an * am); for (int i=0; i<an; ++i) { int base = i * am; scanf("%lf", &h_a[base]); for (int j=1; j<am; ++j) scanf(",%lf", &h_a[base+j]); } scanf("%d,%d", &bn, &bm); // printf("%d %d\n", bn, bm); exit(0); h_b = (ld*)malloc(sizeof(ld) * bn * bm); for (int i=0; i<bn; ++i) { int base = i * bm; scanf("%lf", &h_b[base]); for (int j=1; j<bm; ++j) scanf(",%lf", &h_b[base+j]); } // B.readtrans(); // outputMatrix(h_a, an, am); // outputMatrix(h_b, bn, bm); // exit(0); int block_size = 16; dim3 threads(block_size, block_size); dim3 grid((an + threads.x - 1) / threads.x, (bm + threads.y - 1) / threads.y); n = an; m = bm; // fprintf(stderr, "grid= %d,%d,%d threads= %d,%d,%d\n", grid.x, grid.y, grid.z, threads.x, threads.y, threads.z); // read into main memory copyMatrix(h_a, d_a, an, am); copyMatrix(h_b, d_b, bn, bm); handleCudaError(cudaMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c"); // // puts("entering danger"); // matrixMult<<<threads, grid>>>(d_a, d_b, d_c, an, bm, am); // // if (cudaGetLastError() != cudaSuccess) { // // cerr << "failed in matrixMult" << endl; // // exit(0); // // } else cerr << "looks good in matrixMult" << endl; // // puts("FUCK"); // // ld *c = copyMatrixBack(d_c, n, m); // h_c = (ld*)malloc(sizeof(ld) * n * m); // int size = sizeof(ld) * n * m; // handleCudaError(cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost), "memcpy back"); // outputMatrix(h_c, n, m); // output::flush(); return 0; }
9e0c2d68e1e75111554346ac40e6f419117da088.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #define EIGEN_USE_GPU #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "float.h" #define INNER_BATCH_SIZE 4 #define BATCH 512 __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ __shared__ float buf[BATCH*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=BATCH){ int end_k=min(m,k2+BATCH)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=FLT_MAX; int end_ka=end_k-(end_k & (INNER_BATCH_SIZE - 1)); for (int k=0;k<end_ka;k+=INNER_BATCH_SIZE){ #pragma unroll for (int u=0; u < INNER_BATCH_SIZE; u++) { float x2=buf[k*3+u*3]-x1; float y2=buf[k*3+u*3+1]-y1; float z2=buf[k*3+u*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+u; } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(4,128,1)),dim3(512), 0, 0, b,n,xyz,m,xyz2,result,result_i); hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(4,128,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz,result2,result2_i); } __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){ hipMemset(grad_xyz1,0,b*n*3*4); hipMemset(grad_xyz2,0,b*m*3*4); hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,64,1)),dim3(512), 0, 0, b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,64,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); } #endif
9e0c2d68e1e75111554346ac40e6f419117da088.cu
#if GOOGLE_CUDA #define EIGEN_USE_GPU #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "float.h" #define INNER_BATCH_SIZE 4 #define BATCH 512 __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ __shared__ float buf[BATCH*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=BATCH){ int end_k=min(m,k2+BATCH)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=FLT_MAX; int end_ka=end_k-(end_k & (INNER_BATCH_SIZE - 1)); for (int k=0;k<end_ka;k+=INNER_BATCH_SIZE){ #pragma unroll for (int u=0; u < INNER_BATCH_SIZE; u++) { float x2=buf[k*3+u*3]-x1; float y2=buf[k*3+u*3+1]-y1; float z2=buf[k*3+u*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+u; } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ NmDistanceKernel<<<dim3(4,128,1),512>>>(b,n,xyz,m,xyz2,result,result_i); NmDistanceKernel<<<dim3(4,128,1),512>>>(b,m,xyz2,n,xyz,result2,result2_i); } __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){ cudaMemset(grad_xyz1,0,b*n*3*4); cudaMemset(grad_xyz2,0,b*m*3*4); NmDistanceGradKernel<<<dim3(1,64,1),512>>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); NmDistanceGradKernel<<<dim3(1,64,1),512>>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); } #endif
9592d94f9558727c7715f4b568848bd3af8e907c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <malloc.h> using namespace std; __global__ void add(int* d_a, int* d_b, int* d_c, int* d_limit){ int tid = threadIdx.x + blockIdx.x*blockDim.x; if(tid < 1000){ d_c[tid] = d_a[tid] + d_b[tid]; } } int main(){ int size = 2000; // size of an array int ngpus = 2; /* Device memory pointer for storing array*/ int *d_a[2], *d_b[2], *d_c[2]; const int Ns[2] = {size/2, size - size/2}; /* memory allocation for limit */ int* h_limit; int* d_limit; h_limit = (int *)malloc(sizeof(int)); hipMalloc((void **)&d_limit, sizeof(int)); /* Host memory for storing array */ int h_a[size]; int h_b[size]; for(int i=0;i<size;i++){ h_a[i] = i+1; h_b[i] = i+2; } /*int* h_c[ngpus]; for(int dev=0; dev < ngpus; dev++){ h_c[dev] = (int *)malloc(Ns[dev]*sizeof(int)); }*/ int* h_c; h_c = (int *)malloc(size*sizeof(int)); /* allocate memory on gpus */ for(int dev=0; dev< ngpus ;dev++){ hipSetDevice(dev); hipMalloc((void **)&d_a[dev], Ns[dev]*sizeof(int)); hipMalloc((void **)&d_b[dev], Ns[dev]*sizeof(int)); hipMalloc((void **)&d_c[dev], Ns[dev]*sizeof(int)); } /* Copy the host array to gpus */ for(int dev=0,pos=0; dev < ngpus; pos+= Ns[dev], dev++){ hipSetDevice(dev); hipMemcpy(d_a[dev], h_a+pos, Ns[dev]*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b[dev], h_b+pos, Ns[dev]*sizeof(int), hipMemcpyHostToDevice); } /* Compute addition */ for(int dev=0; dev< ngpus; dev++){ //h_limit[0] = Ns[dev]; hipSetDevice(dev); h_limit[0] = Ns[dev]; hipMemcpy(d_limit, h_limit, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(1),dim3(Ns[dev]), 0, 0, d_a[dev],d_b[dev], d_c[dev], d_limit); /*hipMemcpy(h_c[dev], d_c[dev], Ns[dev]*sizeof(int), hipMemcpyDeviceToHost); for(int i=0;i<Ns[dev];i++){ if(i%100 == 0) cout<<h_c[dev][i]<<endl; }*/ } for(int dev=0, pos=0; dev < ngpus; pos += Ns[dev], dev++){ hipSetDevice(dev); hipMemcpy(h_c+pos, d_c[dev], Ns[dev]*sizeof(int), hipMemcpyDeviceToHost); } /* Print Part */ for(int i=0;i<size;i++){ if(i%100 == 0) cout<<"h_c["<<i<<"] = "<<h_c[i]<<endl; } }
9592d94f9558727c7715f4b568848bd3af8e907c.cu
#include <iostream> #include <malloc.h> using namespace std; __global__ void add(int* d_a, int* d_b, int* d_c, int* d_limit){ int tid = threadIdx.x + blockIdx.x*blockDim.x; if(tid < 1000){ d_c[tid] = d_a[tid] + d_b[tid]; } } int main(){ int size = 2000; // size of an array int ngpus = 2; /* Device memory pointer for storing array*/ int *d_a[2], *d_b[2], *d_c[2]; const int Ns[2] = {size/2, size - size/2}; /* memory allocation for limit */ int* h_limit; int* d_limit; h_limit = (int *)malloc(sizeof(int)); cudaMalloc((void **)&d_limit, sizeof(int)); /* Host memory for storing array */ int h_a[size]; int h_b[size]; for(int i=0;i<size;i++){ h_a[i] = i+1; h_b[i] = i+2; } /*int* h_c[ngpus]; for(int dev=0; dev < ngpus; dev++){ h_c[dev] = (int *)malloc(Ns[dev]*sizeof(int)); }*/ int* h_c; h_c = (int *)malloc(size*sizeof(int)); /* allocate memory on gpus */ for(int dev=0; dev< ngpus ;dev++){ cudaSetDevice(dev); cudaMalloc((void **)&d_a[dev], Ns[dev]*sizeof(int)); cudaMalloc((void **)&d_b[dev], Ns[dev]*sizeof(int)); cudaMalloc((void **)&d_c[dev], Ns[dev]*sizeof(int)); } /* Copy the host array to gpus */ for(int dev=0,pos=0; dev < ngpus; pos+= Ns[dev], dev++){ cudaSetDevice(dev); cudaMemcpy(d_a[dev], h_a+pos, Ns[dev]*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b[dev], h_b+pos, Ns[dev]*sizeof(int), cudaMemcpyHostToDevice); } /* Compute addition */ for(int dev=0; dev< ngpus; dev++){ //h_limit[0] = Ns[dev]; cudaSetDevice(dev); h_limit[0] = Ns[dev]; cudaMemcpy(d_limit, h_limit, sizeof(int), cudaMemcpyHostToDevice); add<<<1,Ns[dev]>>>(d_a[dev],d_b[dev], d_c[dev], d_limit); /*cudaMemcpy(h_c[dev], d_c[dev], Ns[dev]*sizeof(int), cudaMemcpyDeviceToHost); for(int i=0;i<Ns[dev];i++){ if(i%100 == 0) cout<<h_c[dev][i]<<endl; }*/ } for(int dev=0, pos=0; dev < ngpus; pos += Ns[dev], dev++){ cudaSetDevice(dev); cudaMemcpy(h_c+pos, d_c[dev], Ns[dev]*sizeof(int), cudaMemcpyDeviceToHost); } /* Print Part */ for(int i=0;i<size;i++){ if(i%100 == 0) cout<<"h_c["<<i<<"] = "<<h_c[i]<<endl; } }
ee22a517e4a1b75a8a2dc0aa9b86699fb4e62e07.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cube.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_out = NULL; hipMalloc(&d_out, XSIZE*YSIZE); double *d_in = NULL; hipMalloc(&d_in, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cube), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cube), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cube), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ee22a517e4a1b75a8a2dc0aa9b86699fb4e62e07.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cube.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_out = NULL; cudaMalloc(&d_out, XSIZE*YSIZE); double *d_in = NULL; cudaMalloc(&d_in, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cube<<<gridBlock,threadBlock>>>(d_out,d_in); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cube<<<gridBlock,threadBlock>>>(d_out,d_in); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cube<<<gridBlock,threadBlock>>>(d_out,d_in); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0eefac81c693120342b319fd9cd5f243b102d1ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void multiplication(char* M, char* N, char* P, int Width) { int tid, tx, ty; tx = blockDim.x*blockIdx.x + threadIdx.x; ty = blockDim.y*blockIdx.y + threadIdx.y; tid = Width*ty + tx; char Value = 0; char MVal = 0; char NVal = 0; for (int i = 0; i < Width; i++) { MVal = M[ty * Width + i]; NVal = N[i * Width + tx]; Value += MVal * NVal; } P[tid] = Value; }
0eefac81c693120342b319fd9cd5f243b102d1ac.cu
extern "C" __global__ void multiplication(char* M, char* N, char* P, int Width) { int tid, tx, ty; tx = blockDim.x*blockIdx.x + threadIdx.x; ty = blockDim.y*blockIdx.y + threadIdx.y; tid = Width*ty + tx; char Value = 0; char MVal = 0; char NVal = 0; for (int i = 0; i < Width; i++) { MVal = M[ty * Width + i]; NVal = N[i * Width + tx]; Value += MVal * NVal; } P[tid] = Value; }
5a46fa5ebce2c932ed1d7e9995356228b508305d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kunet.h" __global__ void _softloss32(int n, double scale, float *y, float *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(y[i] - dy[i])/y[i]; i += blockDim.x * gridDim.x; } } __global__ void _softloss64(int n, double scale, double *y, double *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(y[i] - dy[i])/y[i]; i += blockDim.x * gridDim.x; } } extern "C" { void softloss32(int n, double s, float *y, float *dy) KCALL(_softloss32,n,s,y,dy); void softloss64(int n, double s, double *y, double *dy) KCALL(_softloss64,n,s,y,dy); } __global__ void _logploss32(int n, double scale, float *y, float *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(exp(y[i]) - dy[i]); i += blockDim.x * gridDim.x; } } __global__ void _logploss64(int n, double scale, double *y, double *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(exp(y[i]) - dy[i]); i += blockDim.x * gridDim.x; } } extern "C" { void logploss32(int n, double s, float *y, float *dy) KCALL(_logploss32,n,s,y,dy); void logploss64(int n, double s, double *y, double *dy) KCALL(_logploss64,n,s,y,dy); } __global__ void _xentloss32(int nd, int nx, float *y, float *dy) { double z, ymax; // double *qz = (double *) malloc(nd * sizeof(double)); int i0, i1; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { i0 = ix * nd; i1 = i0 + nd; z = 0; ymax = -INFINITY; for (int i=i0; i<i1; i++) { if (y[i] > ymax) ymax = y[i]; } for (int i=i0; i<i1; i++) { y[i] = exp(y[i] - ymax); z+=y[i]; } for (int i=i0; i<i1; i++) { y[i] /= z; dy[i] = (y[i] - dy[i])/nx; } //for (int i=i0; i<i1; i++) { z += (qz[i-i0] = exp(y[i] - ymax)); } //for (int i=i0; i<i1; i++) { dy[i] = (qz[i-i0]/z - dy[i])/nx; } ix += blockDim.x * gridDim.x; } // free(qz); } __global__ void _xentloss64(int nd, int nx, double *y, double *dy) { double z, ymax; // double *qz = (double *) malloc(nd * sizeof(double)); int i0, i1; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { i0 = ix * nd; i1 = i0 + nd; z = 0; ymax = -INFINITY; for (int i=i0; i<i1; i++) { if (y[i] > ymax) ymax = y[i]; } for (int i=i0; i<i1; i++) { y[i] = exp(y[i] - ymax); z+=y[i]; } for (int i=i0; i<i1; i++) { y[i] /= z; dy[i] = (y[i] - dy[i])/nx; } // for (int i=i0; i<i1; i++) { z += (qz[i-i0] = exp(y[i] - ymax)); } // for (int i=i0; i<i1; i++) { dy[i] = (qz[i-i0]/z - dy[i])/nx; } ix += blockDim.x * gridDim.x; } // free(qz); } extern "C" { void xentloss32(int nd, int nx, float *y, float *dy) KCALL(_xentloss32,nd,nx,y,dy); void xentloss64(int nd, int nx, double *y, double *dy) KCALL(_xentloss64,nd,nx,y,dy); } __global__ void _percloss32(int nd, int nx, float *y, float *z) { float ymax, zmax; int i0, i1, cy, cz; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { ymax = -INFINITY; cy = 0; zmax = -INFINITY; cz = 0; i0 = ix * nd; i1 = i0 + nd; for (int i=i0; i<i1; i++) { if (y[i] > ymax) { ymax = y[i]; cy = i; } if (z[i] > zmax) { zmax = z[i]; cz = i; } z[i] = 0; } if (cz != cy) { z[cz] = -1; z[cy] = 1; } ix += blockDim.x * gridDim.x; } } __global__ void _percloss64(int nd, int nx, double *y, double *z) { double ymax, zmax; int i0, i1, cy, cz; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { i0 = ix * nd; i1 = i0 + nd; ymax = -INFINITY; cy = 0; zmax = -INFINITY; cz = 0; for (int i=i0; i<i1; i++) { if (y[i] > ymax) { ymax = y[i]; cy = i; } if (z[i] > zmax) { zmax = z[i]; cz = i; } z[i] = 0; } if (cz != cy) { z[cz] = -1; z[cy] = 1; } ix += blockDim.x * gridDim.x; } } extern "C" { void percloss32(int nd, int nx, float *y, float *z) KCALL(_percloss32,nd,nx,y,z); void percloss64(int nd, int nx, double *y, double *z) KCALL(_percloss64,nd,nx,y,z); }
5a46fa5ebce2c932ed1d7e9995356228b508305d.cu
#include "kunet.h" __global__ void _softloss32(int n, double scale, float *y, float *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(y[i] - dy[i])/y[i]; i += blockDim.x * gridDim.x; } } __global__ void _softloss64(int n, double scale, double *y, double *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(y[i] - dy[i])/y[i]; i += blockDim.x * gridDim.x; } } extern "C" { void softloss32(int n, double s, float *y, float *dy) KCALL(_softloss32,n,s,y,dy); void softloss64(int n, double s, double *y, double *dy) KCALL(_softloss64,n,s,y,dy); } __global__ void _logploss32(int n, double scale, float *y, float *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(exp(y[i]) - dy[i]); i += blockDim.x * gridDim.x; } } __global__ void _logploss64(int n, double scale, double *y, double *dy) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dy[i] = scale*(exp(y[i]) - dy[i]); i += blockDim.x * gridDim.x; } } extern "C" { void logploss32(int n, double s, float *y, float *dy) KCALL(_logploss32,n,s,y,dy); void logploss64(int n, double s, double *y, double *dy) KCALL(_logploss64,n,s,y,dy); } __global__ void _xentloss32(int nd, int nx, float *y, float *dy) { double z, ymax; // double *qz = (double *) malloc(nd * sizeof(double)); int i0, i1; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { i0 = ix * nd; i1 = i0 + nd; z = 0; ymax = -INFINITY; for (int i=i0; i<i1; i++) { if (y[i] > ymax) ymax = y[i]; } for (int i=i0; i<i1; i++) { y[i] = exp(y[i] - ymax); z+=y[i]; } for (int i=i0; i<i1; i++) { y[i] /= z; dy[i] = (y[i] - dy[i])/nx; } //for (int i=i0; i<i1; i++) { z += (qz[i-i0] = exp(y[i] - ymax)); } //for (int i=i0; i<i1; i++) { dy[i] = (qz[i-i0]/z - dy[i])/nx; } ix += blockDim.x * gridDim.x; } // free(qz); } __global__ void _xentloss64(int nd, int nx, double *y, double *dy) { double z, ymax; // double *qz = (double *) malloc(nd * sizeof(double)); int i0, i1; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { i0 = ix * nd; i1 = i0 + nd; z = 0; ymax = -INFINITY; for (int i=i0; i<i1; i++) { if (y[i] > ymax) ymax = y[i]; } for (int i=i0; i<i1; i++) { y[i] = exp(y[i] - ymax); z+=y[i]; } for (int i=i0; i<i1; i++) { y[i] /= z; dy[i] = (y[i] - dy[i])/nx; } // for (int i=i0; i<i1; i++) { z += (qz[i-i0] = exp(y[i] - ymax)); } // for (int i=i0; i<i1; i++) { dy[i] = (qz[i-i0]/z - dy[i])/nx; } ix += blockDim.x * gridDim.x; } // free(qz); } extern "C" { void xentloss32(int nd, int nx, float *y, float *dy) KCALL(_xentloss32,nd,nx,y,dy); void xentloss64(int nd, int nx, double *y, double *dy) KCALL(_xentloss64,nd,nx,y,dy); } __global__ void _percloss32(int nd, int nx, float *y, float *z) { float ymax, zmax; int i0, i1, cy, cz; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { ymax = -INFINITY; cy = 0; zmax = -INFINITY; cz = 0; i0 = ix * nd; i1 = i0 + nd; for (int i=i0; i<i1; i++) { if (y[i] > ymax) { ymax = y[i]; cy = i; } if (z[i] > zmax) { zmax = z[i]; cz = i; } z[i] = 0; } if (cz != cy) { z[cz] = -1; z[cy] = 1; } ix += blockDim.x * gridDim.x; } } __global__ void _percloss64(int nd, int nx, double *y, double *z) { double ymax, zmax; int i0, i1, cy, cz; int ix = threadIdx.x + blockIdx.x * blockDim.x; while (ix < nx) { i0 = ix * nd; i1 = i0 + nd; ymax = -INFINITY; cy = 0; zmax = -INFINITY; cz = 0; for (int i=i0; i<i1; i++) { if (y[i] > ymax) { ymax = y[i]; cy = i; } if (z[i] > zmax) { zmax = z[i]; cz = i; } z[i] = 0; } if (cz != cy) { z[cz] = -1; z[cy] = 1; } ix += blockDim.x * gridDim.x; } } extern "C" { void percloss32(int nd, int nx, float *y, float *z) KCALL(_percloss32,nd,nx,y,z); void percloss64(int nd, int nx, double *y, double *z) KCALL(_percloss64,nd,nx,y,z); }
120a8d6d0a5189b9146f0c61ff9621d8b8e8b84d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "rnn_impl.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _ReverseBySequenceKernel(const int32_t seq_length, const int32_t block_size, const fast_divmod div_batch_block, const T* data, T* reversed_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int seq_id, offset; div_batch_block.divmod(id, seq_id, offset); int org_id = (seq_length - seq_id - 1) * block_size + offset; reversed_data[id] = data[org_id]; } template <typename T> void ReverseBySequence(const int32_t seq_length, const int32_t batch_size, const int32_t input_or_hidden_size, const T* data, T* reversed_data, const size_t N) { // kerneral int32_t block_size = batch_size * input_or_hidden_size; fast_divmod div_batch_block(block_size); int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); hipLaunchKernelGGL(( _ReverseBySequenceKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, seq_length, block_size, div_batch_block, data, reversed_data, (CUDA_LONG)N); } template <typename T> __global__ void _BidirectionalDataKernel(const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const int32_t seq_block_size, const fast_divmod div_seq_block, const fast_divmod div_output_block, const T* data, T* reordered_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int seq_id, seq_offset, output_id, offset; div_seq_block.divmod(id, seq_id, seq_offset); div_output_block.divmod(seq_offset, output_id, offset); int org_output_id = 0; if (output_id < batch_size) { org_output_id = 2 * output_id; } else { org_output_id = (output_id - batch_size) * 2 + 1; } int org_id = seq_id * seq_block_size + org_output_id * hidden_size + offset; reordered_data[id] = data[org_id]; } template <typename T> void ReorderBidirectionalDataInSequence(const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const T* data, T* reordered_data, const size_t N) { // The cudnn Y output is organize like [Y1, YB1] [Y2, YB2] ... // need to reorganize it to [Y1, Y2, ...] [YB1, YB2, ...] int32_t seq_block_size = 2 * batch_size * hidden_size; fast_divmod div_seq_block(seq_block_size); fast_divmod div_output_block(hidden_size); int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); hipLaunchKernelGGL(( _BidirectionalDataKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, seq_length, batch_size, hidden_size, seq_block_size, div_seq_block, div_output_block, data, reordered_data, (CUDA_LONG)N); } template <typename T> __global__ void _RnnMaskKernel(const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const int32_t* sequence_lens, const fast_divmod div_seq_block, const fast_divmod div_dir_block, const fast_divmod div_batch_block, T* y_output_data, T* y_h_output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int seq_id, direction_id, batch_id, offset; div_seq_block.divmod(id, seq_id, offset); div_dir_block.divmod(offset, direction_id, offset); div_batch_block.divmod(offset, batch_id, offset); int32_t batch_seq_length = sequence_lens[batch_id]; if (batch_id >= batch_size || batch_seq_length == seq_length) { return; } if (seq_id >= batch_seq_length) { y_output_data[id] = 0; return; } if ((y_h_output_data != nullptr) && ((direction_id == 0 && (seq_id + 1) == batch_seq_length) || (direction_id == 1 && seq_id == 0))) { int hy_idx = direction_id * batch_size * hidden_size + batch_id * hidden_size + offset; y_h_output_data[hy_idx] = y_output_data[id]; } } template <typename T> void RnnMaskImpl(const int32_t num_directions, const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const int32_t* sequence_lens, T* y_output_data, T* y_h_output_data, const size_t N) { fast_divmod div_seq_block(batch_size * hidden_size * num_directions); fast_divmod div_dir_block(batch_size * hidden_size); fast_divmod div_batch_block(hidden_size); int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); hipLaunchKernelGGL(( _RnnMaskKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, seq_length, batch_size, hidden_size, sequence_lens, div_seq_block, div_dir_block, div_batch_block, y_output_data, y_h_output_data, (CUDA_LONG)N); } template <typename T> __global__ void _MaskZeroSequences(const int32_t hidden_size, T* y_output_data, T* y_h_output_data, T* y_c_output_data, const int32_t* zeor_seq_index_cache, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int32_t zero_seq_offset = zeor_seq_index_cache[id] * hidden_size; if (y_output_data != nullptr) { for (int i = 0; i < hidden_size; ++i) { y_output_data[zero_seq_offset + i] = 0; } } if (y_h_output_data != nullptr) { for (int i = 0; i < hidden_size; ++i) { y_h_output_data[zero_seq_offset + i] = 0; } } if (y_c_output_data != nullptr) { for (int i = 0; i < hidden_size; ++i) { y_c_output_data[zero_seq_offset + i] = 0; } } } template <typename T> void MaskZeroSequences(const int32_t hidden_size, T* y_output_data, T* y_h_output_data, T* y_c_output_data, const int32_t* zeor_seq_index_cache, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); hipLaunchKernelGGL(( _MaskZeroSequences<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, hidden_size, y_output_data, y_h_output_data, y_c_output_data, zeor_seq_index_cache, (CUDA_LONG)N); } #define SPECIALIZED_RNN_IMPL(T) \ template void RnnMaskImpl<T>(const int32_t num_directions, \ const int32_t seq_length, \ const int32_t batch_size, \ const int32_t hidden_size, \ const int32_t* sequence_lens, \ T* y_output_data, \ T* y_h_output_data, \ const size_t N); \ template void ReverseBySequence<T>(const int32_t seq_length, \ const int32_t batch_size, \ const int32_t hidden_size, \ const T* data, \ T* reversed_data, \ const size_t N); \ template void ReorderBidirectionalDataInSequence<T>(const int32_t seq_length, \ const int32_t batch_size, \ const int32_t hidden_size,\ const T* data, \ T* reordered_data, \ const size_t N); \ template void MaskZeroSequences<T>(const int32_t hidden_size, \ T* y_output_data, \ T* y_h_output_data, \ T* y_c_output_data, \ const int32_t* zeor_seq_index_cache, \ const size_t N); SPECIALIZED_RNN_IMPL(half) SPECIALIZED_RNN_IMPL(float) SPECIALIZED_RNN_IMPL(double) } // namespace cuda } // namespace onnxruntime
120a8d6d0a5189b9146f0c61ff9621d8b8e8b84d.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "rnn_impl.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _ReverseBySequenceKernel(const int32_t seq_length, const int32_t block_size, const fast_divmod div_batch_block, const T* data, T* reversed_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int seq_id, offset; div_batch_block.divmod(id, seq_id, offset); int org_id = (seq_length - seq_id - 1) * block_size + offset; reversed_data[id] = data[org_id]; } template <typename T> void ReverseBySequence(const int32_t seq_length, const int32_t batch_size, const int32_t input_or_hidden_size, const T* data, T* reversed_data, const size_t N) { // kerneral int32_t block_size = batch_size * input_or_hidden_size; fast_divmod div_batch_block(block_size); int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); _ReverseBySequenceKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( seq_length, block_size, div_batch_block, data, reversed_data, (CUDA_LONG)N); } template <typename T> __global__ void _BidirectionalDataKernel(const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const int32_t seq_block_size, const fast_divmod div_seq_block, const fast_divmod div_output_block, const T* data, T* reordered_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int seq_id, seq_offset, output_id, offset; div_seq_block.divmod(id, seq_id, seq_offset); div_output_block.divmod(seq_offset, output_id, offset); int org_output_id = 0; if (output_id < batch_size) { org_output_id = 2 * output_id; } else { org_output_id = (output_id - batch_size) * 2 + 1; } int org_id = seq_id * seq_block_size + org_output_id * hidden_size + offset; reordered_data[id] = data[org_id]; } template <typename T> void ReorderBidirectionalDataInSequence(const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const T* data, T* reordered_data, const size_t N) { // The cudnn Y output is organize like [Y1, YB1] [Y2, YB2] ... // need to reorganize it to [Y1, Y2, ...] [YB1, YB2, ...] int32_t seq_block_size = 2 * batch_size * hidden_size; fast_divmod div_seq_block(seq_block_size); fast_divmod div_output_block(hidden_size); int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); _BidirectionalDataKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( seq_length, batch_size, hidden_size, seq_block_size, div_seq_block, div_output_block, data, reordered_data, (CUDA_LONG)N); } template <typename T> __global__ void _RnnMaskKernel(const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const int32_t* sequence_lens, const fast_divmod div_seq_block, const fast_divmod div_dir_block, const fast_divmod div_batch_block, T* y_output_data, T* y_h_output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int seq_id, direction_id, batch_id, offset; div_seq_block.divmod(id, seq_id, offset); div_dir_block.divmod(offset, direction_id, offset); div_batch_block.divmod(offset, batch_id, offset); int32_t batch_seq_length = sequence_lens[batch_id]; if (batch_id >= batch_size || batch_seq_length == seq_length) { return; } if (seq_id >= batch_seq_length) { y_output_data[id] = 0; return; } if ((y_h_output_data != nullptr) && ((direction_id == 0 && (seq_id + 1) == batch_seq_length) || (direction_id == 1 && seq_id == 0))) { int hy_idx = direction_id * batch_size * hidden_size + batch_id * hidden_size + offset; y_h_output_data[hy_idx] = y_output_data[id]; } } template <typename T> void RnnMaskImpl(const int32_t num_directions, const int32_t seq_length, const int32_t batch_size, const int32_t hidden_size, const int32_t* sequence_lens, T* y_output_data, T* y_h_output_data, const size_t N) { fast_divmod div_seq_block(batch_size * hidden_size * num_directions); fast_divmod div_dir_block(batch_size * hidden_size); fast_divmod div_batch_block(hidden_size); int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); _RnnMaskKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( seq_length, batch_size, hidden_size, sequence_lens, div_seq_block, div_dir_block, div_batch_block, y_output_data, y_h_output_data, (CUDA_LONG)N); } template <typename T> __global__ void _MaskZeroSequences(const int32_t hidden_size, T* y_output_data, T* y_h_output_data, T* y_c_output_data, const int32_t* zeor_seq_index_cache, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int32_t zero_seq_offset = zeor_seq_index_cache[id] * hidden_size; if (y_output_data != nullptr) { for (int i = 0; i < hidden_size; ++i) { y_output_data[zero_seq_offset + i] = 0; } } if (y_h_output_data != nullptr) { for (int i = 0; i < hidden_size; ++i) { y_h_output_data[zero_seq_offset + i] = 0; } } if (y_c_output_data != nullptr) { for (int i = 0; i < hidden_size; ++i) { y_c_output_data[zero_seq_offset + i] = 0; } } } template <typename T> void MaskZeroSequences(const int32_t hidden_size, T* y_output_data, T* y_h_output_data, T* y_c_output_data, const int32_t* zeor_seq_index_cache, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); _MaskZeroSequences<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( hidden_size, y_output_data, y_h_output_data, y_c_output_data, zeor_seq_index_cache, (CUDA_LONG)N); } #define SPECIALIZED_RNN_IMPL(T) \ template void RnnMaskImpl<T>(const int32_t num_directions, \ const int32_t seq_length, \ const int32_t batch_size, \ const int32_t hidden_size, \ const int32_t* sequence_lens, \ T* y_output_data, \ T* y_h_output_data, \ const size_t N); \ template void ReverseBySequence<T>(const int32_t seq_length, \ const int32_t batch_size, \ const int32_t hidden_size, \ const T* data, \ T* reversed_data, \ const size_t N); \ template void ReorderBidirectionalDataInSequence<T>(const int32_t seq_length, \ const int32_t batch_size, \ const int32_t hidden_size,\ const T* data, \ T* reordered_data, \ const size_t N); \ template void MaskZeroSequences<T>(const int32_t hidden_size, \ T* y_output_data, \ T* y_h_output_data, \ T* y_c_output_data, \ const int32_t* zeor_seq_index_cache, \ const size_t N); SPECIALIZED_RNN_IMPL(half) SPECIALIZED_RNN_IMPL(float) SPECIALIZED_RNN_IMPL(double) } // namespace cuda } // namespace onnxruntime
6972912d9a00f2fda0f12788573bda0ea85d2871.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/tensor/gather_nd_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/atomic/common.cuh" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _GatherNDGradKernel( const size_t num_slices, const T* update_data, T* output_data, const size_t slice_size, const int64_t* slice_offsets) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size); uint64_t slice_offset = slice_offsets[i / slice_size]; size_t j = i % slice_size; atomic_add(output_data + slice_offset + j, update_data[i]); }; template <typename T> void GatherNDGradImpl( const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data) { const auto blocks_per_grid = CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock); hipLaunchKernelGGL(( _GatherNDGradKernel<T>), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, 0, num_slices, static_cast<const T*>(update_data), static_cast<T*>(output_data), slice_size, input_slice_offsets_data); } #define SPECIALIZED_GRAD_IMPL(T) \ template void GatherNDGradImpl<T>(const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data) SPECIALIZED_GRAD_IMPL(float); #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 SPECIALIZED_GRAD_IMPL(half); SPECIALIZED_GRAD_IMPL(double); #endif } // namespace cuda } // namespace onnxruntime
6972912d9a00f2fda0f12788573bda0ea85d2871.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/tensor/gather_nd_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/atomic/common.cuh" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _GatherNDGradKernel( const size_t num_slices, const T* update_data, T* output_data, const size_t slice_size, const int64_t* slice_offsets) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size); uint64_t slice_offset = slice_offsets[i / slice_size]; size_t j = i % slice_size; atomic_add(output_data + slice_offset + j, update_data[i]); }; template <typename T> void GatherNDGradImpl( const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data) { const auto blocks_per_grid = CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock); _GatherNDGradKernel<T><<<blocks_per_grid, GridDim::maxThreadsPerBlock, 0>>>( num_slices, static_cast<const T*>(update_data), static_cast<T*>(output_data), slice_size, input_slice_offsets_data); } #define SPECIALIZED_GRAD_IMPL(T) \ template void GatherNDGradImpl<T>(const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data) SPECIALIZED_GRAD_IMPL(float); #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 SPECIALIZED_GRAD_IMPL(half); SPECIALIZED_GRAD_IMPL(double); #endif } // namespace cuda } // namespace onnxruntime
6462fc8d268012939f165bd19be42cc50f4739b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: unknown(b, n, 3) known(b, m, 3) // output: dist2(b, n, 3), idx(b, n, 3) template <typename scalar_t> __global__ void three_nn_kernel(int b, int n, int m, const scalar_t *__restrict__ unknown, const scalar_t *__restrict__ known, scalar_t *__restrict__ dist2, int *__restrict__ idx) { int batch_index = blockIdx.x; unknown += batch_index * n * 3; known += batch_index * m * 3; dist2 += batch_index * n * 3; idx += batch_index * n * 3; int index = threadIdx.x; int stride = blockDim.x; for (int j = index; j < n; j += stride) { scalar_t ux = unknown[j * 3 + 0]; scalar_t uy = unknown[j * 3 + 1]; scalar_t uz = unknown[j * 3 + 2]; scalar_t best1 = 6e4, best2 = 6e4, best3 = 6e4; int besti1 = 0, besti2 = 0, besti3 = 0; for (int k = 0; k < m; ++k) { scalar_t x = known[k * 3 + 0]; scalar_t y = known[k * 3 + 1]; scalar_t z = known[k * 3 + 2]; scalar_t d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); if (d < best1) { best3 = best2; besti3 = besti2; best2 = best1; besti2 = besti1; best1 = d; besti1 = k; } else if (d < best2) { best3 = best2; besti3 = besti2; best2 = d; besti2 = k; } else if (d < best3) { best3 = d; besti3 = k; } } dist2[j * 3 + 0] = best1; dist2[j * 3 + 1] = best2; dist2[j * 3 + 2] = best3; idx[j * 3 + 0] = besti1; idx[j * 3 + 1] = besti2; idx[j * 3 + 2] = besti3; } } void three_nn_kernel_wrapper(int b, int n, int m, const at::Tensor &unknown, const at::Tensor &known, at::Tensor &dist2, int *idx) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // three_nn_kernel<<<b, opt_n_threads(n), 0, stream>>>(b, n, m, unknown, known, // dist2, idx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(unknown.type(), "three_nn", ([&] { hipLaunchKernelGGL(( three_nn_kernel<scalar_t>), dim3(b), dim3(opt_n_threads(n)), 0, stream, b, n, m, unknown.data_ptr<scalar_t>(), known.data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx); })); CUDA_CHECK_ERRORS(); } // input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) // output: out(b, c, n) template <typename scalar_t> __global__ void three_interpolate_kernel(int b, int c, int m, int n, const scalar_t *__restrict__ points, const int *__restrict__ idx, const scalar_t *__restrict__ weight, scalar_t *__restrict__ out) { int batch_index = blockIdx.x; points += batch_index * m * c; idx += batch_index * n * 3; weight += batch_index * n * 3; out += batch_index * n * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * n; i += stride) { const int l = i / n; const int j = i % n; scalar_t w1 = weight[j * 3 + 0]; scalar_t w2 = weight[j * 3 + 1]; scalar_t w3 = weight[j * 3 + 2]; int i1 = idx[j * 3 + 0]; int i2 = idx[j * 3 + 1]; int i3 = idx[j * 3 + 2]; out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + points[l * m + i3] * w3; } } void three_interpolate_kernel_wrapper(int b, int c, int m, int n, const at::Tensor &points, const int *idx, const at::Tensor &weight, at::Tensor &out) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // three_interpolate_kernel<<<b, opt_block_config(n, c), 0, stream>>>( // b, c, m, n, points, idx, weight, out); AT_DISPATCH_FLOATING_TYPES_AND_HALF(points.type(), "three_interpolate", ([&] { hipLaunchKernelGGL(( three_interpolate_kernel<scalar_t>), dim3(b), dim3(opt_block_config(n, c)), 0, stream, b, c, m, n, points.data_ptr<scalar_t>(), idx, weight.data_ptr<scalar_t>(), out.data_ptr<scalar_t>()); })); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) // output: grad_points(b, c, m) template <typename scalar_t> __global__ void three_interpolate_grad_kernel( int b, int c, int n, int m, const scalar_t *__restrict__ grad_out, const int *__restrict__ idx, const scalar_t *__restrict__ weight, scalar_t *__restrict__ grad_points) { int batch_index = blockIdx.x; grad_out += batch_index * n * c; idx += batch_index * n * 3; weight += batch_index * n * 3; grad_points += batch_index * m * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * n; i += stride) { const int l = i / n; const int j = i % n; scalar_t w1 = weight[j * 3 + 0]; scalar_t w2 = weight[j * 3 + 1]; scalar_t w3 = weight[j * 3 + 2]; int i1 = idx[j * 3 + 0]; int i2 = idx[j * 3 + 1]; int i3 = idx[j * 3 + 2]; gpuAtomicAdd(grad_points + l * m + i1, grad_out[i] * w1); gpuAtomicAdd(grad_points + l * m + i2, grad_out[i] * w2); gpuAtomicAdd(grad_points + l * m + i3, grad_out[i] * w3); } } void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, const at::Tensor &grad_out, const int *idx, const at::Tensor &weight, at::Tensor &grad_points) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // three_interpolate_grad_kernel<<<b, opt_block_config(n, c), 0, stream>>>( // b, c, n, m, grad_out, idx, weight, grad_points); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_out.type(), "three_interpolate_grad", ([&] { hipLaunchKernelGGL(( three_interpolate_grad_kernel<scalar_t>), dim3(b), dim3(opt_block_config(n, c)), 0, stream, b, c, n, m, grad_out.data_ptr<scalar_t>(), idx, weight.data_ptr<scalar_t>(), grad_points.data_ptr<scalar_t>()); })); CUDA_CHECK_ERRORS(); }
6462fc8d268012939f165bd19be42cc50f4739b0.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: unknown(b, n, 3) known(b, m, 3) // output: dist2(b, n, 3), idx(b, n, 3) template <typename scalar_t> __global__ void three_nn_kernel(int b, int n, int m, const scalar_t *__restrict__ unknown, const scalar_t *__restrict__ known, scalar_t *__restrict__ dist2, int *__restrict__ idx) { int batch_index = blockIdx.x; unknown += batch_index * n * 3; known += batch_index * m * 3; dist2 += batch_index * n * 3; idx += batch_index * n * 3; int index = threadIdx.x; int stride = blockDim.x; for (int j = index; j < n; j += stride) { scalar_t ux = unknown[j * 3 + 0]; scalar_t uy = unknown[j * 3 + 1]; scalar_t uz = unknown[j * 3 + 2]; scalar_t best1 = 6e4, best2 = 6e4, best3 = 6e4; int besti1 = 0, besti2 = 0, besti3 = 0; for (int k = 0; k < m; ++k) { scalar_t x = known[k * 3 + 0]; scalar_t y = known[k * 3 + 1]; scalar_t z = known[k * 3 + 2]; scalar_t d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); if (d < best1) { best3 = best2; besti3 = besti2; best2 = best1; besti2 = besti1; best1 = d; besti1 = k; } else if (d < best2) { best3 = best2; besti3 = besti2; best2 = d; besti2 = k; } else if (d < best3) { best3 = d; besti3 = k; } } dist2[j * 3 + 0] = best1; dist2[j * 3 + 1] = best2; dist2[j * 3 + 2] = best3; idx[j * 3 + 0] = besti1; idx[j * 3 + 1] = besti2; idx[j * 3 + 2] = besti3; } } void three_nn_kernel_wrapper(int b, int n, int m, const at::Tensor &unknown, const at::Tensor &known, at::Tensor &dist2, int *idx) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // three_nn_kernel<<<b, opt_n_threads(n), 0, stream>>>(b, n, m, unknown, known, // dist2, idx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(unknown.type(), "three_nn", ([&] { three_nn_kernel<scalar_t><<<b, opt_n_threads(n), 0, stream>>>( b, n, m, unknown.data_ptr<scalar_t>(), known.data_ptr<scalar_t>(), dist2.data_ptr<scalar_t>(), idx); })); CUDA_CHECK_ERRORS(); } // input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) // output: out(b, c, n) template <typename scalar_t> __global__ void three_interpolate_kernel(int b, int c, int m, int n, const scalar_t *__restrict__ points, const int *__restrict__ idx, const scalar_t *__restrict__ weight, scalar_t *__restrict__ out) { int batch_index = blockIdx.x; points += batch_index * m * c; idx += batch_index * n * 3; weight += batch_index * n * 3; out += batch_index * n * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * n; i += stride) { const int l = i / n; const int j = i % n; scalar_t w1 = weight[j * 3 + 0]; scalar_t w2 = weight[j * 3 + 1]; scalar_t w3 = weight[j * 3 + 2]; int i1 = idx[j * 3 + 0]; int i2 = idx[j * 3 + 1]; int i3 = idx[j * 3 + 2]; out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + points[l * m + i3] * w3; } } void three_interpolate_kernel_wrapper(int b, int c, int m, int n, const at::Tensor &points, const int *idx, const at::Tensor &weight, at::Tensor &out) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // three_interpolate_kernel<<<b, opt_block_config(n, c), 0, stream>>>( // b, c, m, n, points, idx, weight, out); AT_DISPATCH_FLOATING_TYPES_AND_HALF(points.type(), "three_interpolate", ([&] { three_interpolate_kernel<scalar_t><<<b, opt_block_config(n, c), 0, stream>>>( b, c, m, n, points.data_ptr<scalar_t>(), idx, weight.data_ptr<scalar_t>(), out.data_ptr<scalar_t>()); })); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) // output: grad_points(b, c, m) template <typename scalar_t> __global__ void three_interpolate_grad_kernel( int b, int c, int n, int m, const scalar_t *__restrict__ grad_out, const int *__restrict__ idx, const scalar_t *__restrict__ weight, scalar_t *__restrict__ grad_points) { int batch_index = blockIdx.x; grad_out += batch_index * n * c; idx += batch_index * n * 3; weight += batch_index * n * 3; grad_points += batch_index * m * c; const int index = threadIdx.y * blockDim.x + threadIdx.x; const int stride = blockDim.y * blockDim.x; for (int i = index; i < c * n; i += stride) { const int l = i / n; const int j = i % n; scalar_t w1 = weight[j * 3 + 0]; scalar_t w2 = weight[j * 3 + 1]; scalar_t w3 = weight[j * 3 + 2]; int i1 = idx[j * 3 + 0]; int i2 = idx[j * 3 + 1]; int i3 = idx[j * 3 + 2]; gpuAtomicAdd(grad_points + l * m + i1, grad_out[i] * w1); gpuAtomicAdd(grad_points + l * m + i2, grad_out[i] * w2); gpuAtomicAdd(grad_points + l * m + i3, grad_out[i] * w3); } } void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, const at::Tensor &grad_out, const int *idx, const at::Tensor &weight, at::Tensor &grad_points) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // three_interpolate_grad_kernel<<<b, opt_block_config(n, c), 0, stream>>>( // b, c, n, m, grad_out, idx, weight, grad_points); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_out.type(), "three_interpolate_grad", ([&] { three_interpolate_grad_kernel<scalar_t><<<b, opt_block_config(n, c), 0, stream>>>( b, c, n, m, grad_out.data_ptr<scalar_t>(), idx, weight.data_ptr<scalar_t>(), grad_points.data_ptr<scalar_t>()); })); CUDA_CHECK_ERRORS(); }
50eae91181414ad98d7a97d828534d9bbe15a224.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/Exceptions.h> // Another possibility: // #include <torch/all.h> #include <assert.h> #include "type_shim.h" #include "multi_tensor_apply.cuh" #define BLOCK_SIZE 512 #define ILP 4 // Step 1 computes the 'update' value of regular Adam optimizer. template<typename GRAD_T, typename T, typename UPD_T> struct LAMBStage1Functor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<5>& tl, const float* per_tensor_decay, const float beta1, const float beta2, const float beta1_correction, const float beta2_correction, const float epsilon, const float clipped_global_grad_norm) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int tensor_num = tl.start_tensor_this_launch + tensor_loc; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; float decay = per_tensor_decay[tensor_num]; GRAD_T* g = (GRAD_T*)tl.addresses[0][tensor_loc]; g += chunk_idx*chunk_size; T* p = (T*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; T* m = (T*)tl.addresses[2][tensor_loc]; m += chunk_idx*chunk_size; T* v = (T*)tl.addresses[3][tensor_loc]; v += chunk_idx*chunk_size; UPD_T* update = (UPD_T*)tl.addresses[4][tensor_loc]; update += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; // see note in multi_tensor_scale_kernel.cu for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { GRAD_T r_g[ILP]; T r_p[ILP]; T r_m[ILP]; T r_v[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_g[ii] = g[i]; r_p[ii] = p[i]; r_m[ii] = m[i]; r_v[ii] = v[i]; } else { r_g[ii] = GRAD_T(0); r_p[ii] = T(0); r_m[ii] = T(0); r_v[ii] = T(0); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { T scaled_grad = r_g[ii] / clipped_global_grad_norm; r_m[ii] = r_m[ii] * beta1 + (1-beta1) * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; T next_m_unbiased = r_m[ii] / beta1_correction; T next_v_unbiased = r_v[ii] / beta2_correction; T denom = std::sqrt(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { update[i] = (UPD_T)r_p[ii]; m[i] = r_m[ii]; v[i] = r_v[ii]; } } } } }; void multi_tensor_lamb_stage1_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor per_tensor_decay, const int step, const float beta1, const float beta2, const float epsilon, const float global_grad_norm, const float max_global_grad_norm) { using namespace at; float clipped_global_grad_norm = global_grad_norm > max_global_grad_norm ? global_grad_norm / max_global_grad_norm : 1.0f; float next_step = float(step+1); float beta1_correction = 1.0f - ::pow(beta1, next_step); float beta2_correction = 1.0f - ::pow(beta2, next_step); DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1", DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "lamb_stage_1", DISPATCH_FLOAT_AND_HALF(tensor_lists[4][0].scalar_type(), 2, "lamb_stage_1", multi_tensor_apply<5>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, LAMBStage1Functor<scalar_t_0, scalar_t_1, scalar_t_2>(), per_tensor_decay.DATA_PTR<float>(), beta1, beta2, beta1_correction, beta2_correction, epsilon, clipped_global_grad_norm); ))) AT_CUDA_CHECK(hipGetLastError()); // AT_CUDA_CHECK(hipDeviceSynchronize()); }
50eae91181414ad98d7a97d828534d9bbe15a224.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> // Another possibility: // #include <torch/all.h> #include <assert.h> #include "type_shim.h" #include "multi_tensor_apply.cuh" #define BLOCK_SIZE 512 #define ILP 4 // Step 1 computes the 'update' value of regular Adam optimizer. template<typename GRAD_T, typename T, typename UPD_T> struct LAMBStage1Functor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<5>& tl, const float* per_tensor_decay, const float beta1, const float beta2, const float beta1_correction, const float beta2_correction, const float epsilon, const float clipped_global_grad_norm) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int tensor_num = tl.start_tensor_this_launch + tensor_loc; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; float decay = per_tensor_decay[tensor_num]; GRAD_T* g = (GRAD_T*)tl.addresses[0][tensor_loc]; g += chunk_idx*chunk_size; T* p = (T*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; T* m = (T*)tl.addresses[2][tensor_loc]; m += chunk_idx*chunk_size; T* v = (T*)tl.addresses[3][tensor_loc]; v += chunk_idx*chunk_size; UPD_T* update = (UPD_T*)tl.addresses[4][tensor_loc]; update += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; // see note in multi_tensor_scale_kernel.cu for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { GRAD_T r_g[ILP]; T r_p[ILP]; T r_m[ILP]; T r_v[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_g[ii] = g[i]; r_p[ii] = p[i]; r_m[ii] = m[i]; r_v[ii] = v[i]; } else { r_g[ii] = GRAD_T(0); r_p[ii] = T(0); r_m[ii] = T(0); r_v[ii] = T(0); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { T scaled_grad = r_g[ii] / clipped_global_grad_norm; r_m[ii] = r_m[ii] * beta1 + (1-beta1) * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; T next_m_unbiased = r_m[ii] / beta1_correction; T next_v_unbiased = r_v[ii] / beta2_correction; T denom = std::sqrt(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { update[i] = (UPD_T)r_p[ii]; m[i] = r_m[ii]; v[i] = r_v[ii]; } } } } }; void multi_tensor_lamb_stage1_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor per_tensor_decay, const int step, const float beta1, const float beta2, const float epsilon, const float global_grad_norm, const float max_global_grad_norm) { using namespace at; float clipped_global_grad_norm = global_grad_norm > max_global_grad_norm ? global_grad_norm / max_global_grad_norm : 1.0f; float next_step = float(step+1); float beta1_correction = 1.0f - std::pow(beta1, next_step); float beta2_correction = 1.0f - std::pow(beta2, next_step); DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1", DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "lamb_stage_1", DISPATCH_FLOAT_AND_HALF(tensor_lists[4][0].scalar_type(), 2, "lamb_stage_1", multi_tensor_apply<5>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, LAMBStage1Functor<scalar_t_0, scalar_t_1, scalar_t_2>(), per_tensor_decay.DATA_PTR<float>(), beta1, beta2, beta1_correction, beta2_correction, epsilon, clipped_global_grad_norm); ))) AT_CUDA_CHECK(cudaGetLastError()); // AT_CUDA_CHECK(cudaDeviceSynchronize()); }
d7ca3cc863e5f14e348019e82ff8bda7da015dd1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/cudaconv2.cuh" #include "../../nvmatrix/include/nvmatrix_kernels.cuh" #define LO16(x) ((x) & 0x0000FFFF) #define HI16(x) ((x) >> 16) #define WA_LOOP(r) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_LOOP2(r) _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_IMLOAD(r) imPreload[r] = im[(r) * B_X * B_Y / preloadCases * imgPixels * imgStride]; #define WA_IMLOAD_2(r) imPreload[r] = images[imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride]; #define WA_IMLOAD_TX(r) imPreload[r] = tex1Dfetch<float>(images, imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride); #define WA_HALOAD(r) haPreload[r] = ha[(r) * B_X * B_Y / preloadCases * numImages * numModules]; #define WA_HALOAD_2(r) haPreload[r] = hidActs[hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules]; #define WA_HALOAD_TX(r) haPreload[r] = tex1Dfetch<float>(hidActs, hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules); __device__ __forceinline__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( const int my, const int mx, const int paddingStart, const int numModulesX, const int moduleStride, const int blockPixelY, const int blockPixelX, const int imgSizeX, const int imgStride, int& pixIdx, int& m) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image const int pxX = imgLoadModPosX + blockPixelX; pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image m = my * numModulesX + mx; } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __global__ void conv_weight_acts_c_kepler(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int partialSum, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int filterBlocksPerModule = numFilters / (B_X*filtersPerThread); const int outputModuleIdx = blockIdx.x / filterBlocksPerModule; const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % filterBlocksPerModule); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; images += loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += (outputModuleIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } __shared__ int pxIdxes[B_Y*pixelsPerThread]; //__shared__ bool isPxInImage[B_Y*pixelsPerThread]; for (int m = moduleIdx; m < moduleIdx + partialSum; m++) { __syncthreads(); if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride; int pxY = (imgLoadModPosY + (blockPixelOffset + tidx) / filterSize); int pxX = (imgLoadModPosX + (blockPixelOffset + tidx) % filterSize); int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; //isPxInImage[tidx] = ; } __syncthreads(); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X*filtersPerThread) { shHidActs[loadY+y][loadX]= hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } #pragma unroll for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) { //if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some imgGrads from filter pixels that don't exit (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0; } } } } //} __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int p = 0; p < pixelCache; p++) { #pragma unroll for (int c = 0; c < numColors; c++) { prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters * threadIdx.x determines filter * threadIdx.y determines color * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines color batch of B_Y * colorsPerThread * blockIdx.z determines pixel in filter * NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will * fail for filters >= 256*256. I'm assuming I won't ever use such large filters. * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * B_X * B_Y must be divisible by preloadCases */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __global__ void conv_weight_acts_mc_mf_kepler(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int partialSum, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int outputModuleIdx = blockIdx.x / numFilterBlocks; const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += outputModuleIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; //if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float prod[colorsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][f] = 0; } } for (int m = moduleIdx; m < moduleIdx + partialSum; m++) { const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image const int pxX = imgLoadModPosX + blockPixelX; const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) { for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // Checking this condition actually makes things faster ... :/ // So I've removed the !checkCaseBounds flag and just check it all the time. if (caseIdx + loadX < numImages) { /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some images from filter pixels that don't exist (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx]; } } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } } else { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = 0; } } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = 0; } } } __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f]; } } } } /* * Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters * threadIdx.x determines filter * threadIdx.y determines color * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines color batch of B_Y * colorsPerThread * blockIdx.z determines pixel in filter * NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will * fail for filters >= 256*256. I'm assuming I won't ever use such large filters. * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * B_X * B_Y must be divisible by preloadCases */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __global__ void conv_weight_acts_mc_mf_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; //if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float prod[colorsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][f] = 0; } } /* * Note; iterating this way is about 1% slower and uses a few more registers than iterating * over the modules linearly. But it's consistent with the preload routines, * so I'm using it. */ for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxX = imgLoadModPosX + blockPixelX; const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // Checking this condition actually makes things faster ... :/ // So I've removed the !checkCaseBounds flag and just check it all the time. if (caseIdx + loadX < numImages) { /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some images from filter pixels that don't exist (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx]; } } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } } else { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = 0; } } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = 0; } } } __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f]; } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __global__ void conv_weight_acts_c_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; images += loadX; hidActs += blockFilterIdx * numImages * numModules // + loadY * numImages * numModules + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; int pxY = (imgLoadModPosY + fYOff); int pxX = (imgLoadModPosX + fXOff); int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX]= hidActs[caseIdx + fIdx * numImages * numModules + m * numImages]; } } } else { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { // const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX] = 0; } } } #pragma unroll for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) { //if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some imgGrads from filter pixels that don't exit (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0; } } } } //} __syncthreads(); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int p = 0; p < pixelCache; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; } } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } #define WA_C3_LOOP(pp, c) _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } #define WA_C3_LOOP2(pp) _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < 3; ++c) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } \ } #define WA_3_FIDX(y) (((loadY + (y)*B_X*B_Y/preloadCases) % filtersPerThread) * B_X + (loadY + (y)*B_X*B_Y/preloadCases) / filtersPerThread) /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> //__launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (!doWork) { // hidActs -= // } // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [8] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { const int fIdx = WA_3_FIDX(y); // if (doWork) { haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fIdx * numImages * numModules + m * numImages); // } fidx[y] = fIdx * numImages * numModules; } } for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = my * numModulesX + mx; // __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); int myNext = my, mxNext = mx, mNext = m; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); mNext = myNext * numModulesX + mxNext; } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (lastBatch) { // ha = &hidActs[mNext * numImages]; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); WA_C3_LOOP(0,0); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); WA_C3_LOOP(0,1); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP(0,2); haPreload[6] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[6]); haPreload[7] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[7]); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // if (threadIdx.x == 3) targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> //__launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (!doWork) { // hidActs -= // } // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [8] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { const int fIdx = WA_3_FIDX(y); // if (doWork) { haPreload[y] = hidActs[hidActsOffset + fIdx * numImages * numModules + m * numImages]; // } fidx[y] = fIdx * numImages * numModules; } } for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = my * numModulesX + mx; // __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); int myNext = my, mxNext = mx, mNext = m; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); mNext = myNext * numModulesX + mxNext; } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (lastBatch) { // ha = &hidActs[mNext * numImages]; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx]; } } } } __syncthreads(); haPreload[0] = hidActs[hidActsOffset2 + fidx[0]]; haPreload[1] = hidActs[hidActsOffset2 + fidx[1]]; WA_C3_LOOP(0,0); haPreload[2] = hidActs[hidActsOffset2 + fidx[2]]; haPreload[3] = hidActs[hidActsOffset2 + fidx[3]]; WA_C3_LOOP(0,1); haPreload[4] = hidActs[hidActsOffset2 + fidx[4]]; haPreload[5] = hidActs[hidActsOffset2 + fidx[5]]; WA_C3_LOOP(0,2); haPreload[6] = hidActs[hidActsOffset2 + fidx[6]]; haPreload[7] = hidActs[hidActsOffset2 + fidx[7]]; __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // if (threadIdx.x == 3) targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [6] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; // if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { fidx[y] = WA_3_FIDX(y) * numImages * numModules; if (doWork) { // Not actually necessary, I think haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fidx[y] + m * numImages); } } // } int mNext = mStartY * numModulesX + mStartX; for (int my = mStartY; my < mEndY; my++) { // const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = mNext;//my * numModulesX + mx; // __syncthreads(); // const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1)); // if (!lastModule) { // const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1; // const int myNext = my + (mx + 1 == mEndX); // mNext = myNext * numModulesX + mxNext; // } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; // if (lastBatch) { // ha = &hidActs[mNext * numImages]; // } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP2(0); __syncthreads(); /* ================================================================================== * Iteration 1 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { // const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); WA_C3_LOOP2(2); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [6] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; // if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { fidx[y] = WA_3_FIDX(y) * numImages * numModules; if (doWork) { // Not actually necessary, I think haPreload[y] = hidActs[hidActsOffset + fidx[y] + m * numImages]; } } // } int mNext = mStartY * numModulesX + mStartX; for (int my = mStartY; my < mEndY; my++) { // const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = mNext;//my * numModulesX + mx; // __syncthreads(); // const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1)); // if (!lastModule) { // const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1; // const int myNext = my + (mx + 1 == mEndX); // mNext = myNext * numModulesX + mxNext; // } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; // if (lastBatch) { // ha = &hidActs[mNext * numImages]; // } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx]; } } } } __syncthreads(); haPreload[0] = hidActs[hidActsOffset2 + fidx[0]]; haPreload[1] = hidActs[hidActsOffset2 + fidx[1]]; haPreload[2] = hidActs[hidActsOffset2 + fidx[2]]; haPreload[3] = hidActs[hidActsOffset2 + fidx[3]]; haPreload[4] = hidActs[hidActsOffset2 + fidx[4]]; haPreload[5] = hidActs[hidActsOffset2 + fidx[5]]; WA_C3_LOOP2(0); __syncthreads(); /* ================================================================================== * Iteration 1 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { // const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx]; } } } } __syncthreads(); WA_C3_LOOP2(2); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(128, 4) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [8] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels. // All other ways cause crazy excessive register usage. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx); imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + idx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Almost certainly not necessary here. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages); haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + idx); } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); #pragma unroll for (int z = 0; z < 8; ++z) { WA_IMLOAD_TX(z); WA_LOOP2(z); } #pragma unroll for (int z = 0; z < 8; ++z) { WA_HALOAD_TX(z); WA_LOOP2(z+8); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(128, 4) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [8] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels. // All other ways cause crazy excessive register usage. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx); imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + idx]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Almost certainly not necessary here. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages); haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + idx]; } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); #pragma unroll for (int z = 0; z < 8; ++z) { WA_IMLOAD_2(z); WA_LOOP2(z); } #pragma unroll for (int z = 0; z < 8; ++z) { WA_HALOAD_2(z); WA_LOOP2(z+8); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [6] float haPreload[preloadCases*filtersPerThread/B_Y]; // [16] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } WA_LOOP(0); WA_LOOP(1); WA_LOOP(2); WA_LOOP(3); WA_LOOP(4); WA_LOOP(5); WA_IMLOAD_TX(0); WA_LOOP(6); WA_IMLOAD_TX(1); WA_LOOP(7); WA_IMLOAD_TX(2); WA_LOOP(8); WA_IMLOAD_TX(3); WA_LOOP(9); WA_IMLOAD_TX(4); WA_LOOP(10); WA_IMLOAD_TX(5); WA_LOOP(11); WA_HALOAD_TX(0); WA_LOOP(12); WA_HALOAD_TX(1); WA_LOOP(13); WA_HALOAD_TX(2); WA_LOOP(14); WA_HALOAD_TX(3); WA_LOOP(15); WA_HALOAD_TX(4); WA_LOOP(16); WA_HALOAD_TX(5); WA_LOOP(17); WA_HALOAD_TX(6); WA_LOOP(18); WA_HALOAD_TX(7); WA_LOOP(19); WA_HALOAD_TX(8); WA_LOOP(20); WA_HALOAD_TX(9); WA_LOOP(21); WA_HALOAD_TX(10); WA_LOOP(22); WA_HALOAD_TX(11); WA_LOOP(23); WA_HALOAD_TX(12); WA_LOOP(24); WA_HALOAD_TX(13); WA_LOOP(25); WA_HALOAD_TX(14); WA_LOOP(26); WA_HALOAD_TX(15); WA_LOOP(27); WA_LOOP(28); WA_LOOP(29); WA_LOOP(30); WA_LOOP(31); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [6] float haPreload[preloadCases*filtersPerThread/B_Y]; // [16] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages]; } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } WA_LOOP(0); WA_LOOP(1); WA_LOOP(2); WA_LOOP(3); WA_LOOP(4); WA_LOOP(5); WA_IMLOAD_2(0); WA_LOOP(6); WA_IMLOAD_2(1); WA_LOOP(7); WA_IMLOAD_2(2); WA_LOOP(8); WA_IMLOAD_2(3); WA_LOOP(9); WA_IMLOAD_2(4); WA_LOOP(10); WA_IMLOAD_2(5); WA_LOOP(11); WA_HALOAD_2(0); WA_LOOP(12); WA_HALOAD_2(1); WA_LOOP(13); WA_HALOAD_2(2); WA_LOOP(14); WA_HALOAD_2(3); WA_LOOP(15); WA_HALOAD_2(4); WA_LOOP(16); WA_HALOAD_2(5); WA_LOOP(17); WA_HALOAD_2(6); WA_LOOP(18); WA_HALOAD_2(7); WA_LOOP(19); WA_HALOAD_2(8); WA_LOOP(20); WA_HALOAD_2(9); WA_LOOP(21); WA_HALOAD_2(10); WA_LOOP(22); WA_HALOAD_2(11); WA_LOOP(23); WA_HALOAD_2(12); WA_LOOP(24); WA_HALOAD_2(13); WA_LOOP(25); WA_HALOAD_2(14); WA_LOOP(26); WA_HALOAD_2(15); WA_LOOP(27); WA_LOOP(28); WA_LOOP(29); WA_LOOP(30); WA_LOOP(31); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [4] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork && loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } } if (doWork && loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // const float* im = &images[caseIdx + preloadCases + pixIdx]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; // im = &images[pixIdxNext]; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; // ha = &hidActs[mNext * numImages]; } if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } } __syncthreads(); WA_LOOP(0); WA_IMLOAD_TX(0); WA_LOOP(1); WA_IMLOAD_TX(1); WA_LOOP(2); WA_IMLOAD_TX(2); WA_LOOP(3); WA_IMLOAD_TX(3); WA_LOOP(4); WA_HALOAD_TX(0); WA_LOOP(5); WA_HALOAD_TX(1); WA_LOOP(6); WA_HALOAD_TX(2); WA_LOOP(7); WA_HALOAD_TX(3); WA_LOOP(8); WA_HALOAD_TX(4); WA_LOOP(9); WA_HALOAD_TX(5); WA_LOOP(10); WA_HALOAD_TX(6); WA_LOOP(11); WA_HALOAD_TX(7); WA_LOOP(12); WA_LOOP(13); WA_LOOP(14); WA_LOOP(15); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [4] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork && loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx]; } } if (doWork && loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages]; } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // const float* im = &images[caseIdx + preloadCases + pixIdx]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; // im = &images[pixIdxNext]; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; // ha = &hidActs[mNext * numImages]; } if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } } __syncthreads(); WA_LOOP(0); WA_IMLOAD_2(0); WA_LOOP(1); WA_IMLOAD_2(1); WA_LOOP(2); WA_IMLOAD_2(2); WA_LOOP(3); WA_IMLOAD_2(3); WA_LOOP(4); WA_HALOAD_2(0); WA_LOOP(5); WA_HALOAD_2(1); WA_LOOP(6); WA_HALOAD_2(2); WA_LOOP(7); WA_HALOAD_2(3); WA_LOOP(8); WA_HALOAD_2(4); WA_LOOP(9); WA_HALOAD_2(5); WA_LOOP(10); WA_HALOAD_2(6); WA_LOOP(11); WA_HALOAD_2(7); WA_LOOP(12); WA_LOOP(13); WA_LOOP(14); WA_LOOP(15); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModules, numImages) * * targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * * TODO: you can get a slight speed boost for local non-convolutional units by writing special * routines for partialSum = 1. But I dunno if the code duplication is worth it... * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _weightActs(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int sumWidth, float scaleTargets, float scaleOutput) { int numFilterColors = numImgColors / numGroups; int imgStride = images->stride[0]; int numImages = images->size[1]; int imgPixels = images->size[0] / numImgColors; int imgSizeX = imgPixels / imgSizeY; int numModules = numModulesY * numModulesX; int numFilters = hidActs->size[0] / numModules; int numFiltersPerGroup = numFilters / numGroups; THAssert(numImgColors % numGroups == 0); THAssert(numFilters % (16*numGroups) == 0); THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 16 == 0))); THAssert(numGroups == 1 || numFilterColors % 16 == 0); THAssert(imgSizeY * imgSizeX == imgPixels); THAssert(images->size[0] == imgPixels * numImgColors); int filterPixels = filterSize * filterSize; int outputModuleChunksX = DIVUP(numModulesX, sumWidth); int outputModuleChunksY = DIVUP(numModulesY, sumWidth); int outputModuleChunks = outputModuleChunksX * outputModuleChunksY; // partialSum = partialSum == 0 ? numModules : partialSum; // THAssert(numModules % partialSum == 0); THAssert(hidActs->size[1] == numImages); // These routines don't handle the case when only part of the image is visited in the convolution THAssert(paddingStart <= 0); THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); THAssert(moduleStride <= filterSize); THAssert(numModules * numFilters == hidActs->size[0]); THAssert(THCudaTensor_isContiguous(hidActs)); THAssert(THCudaTensor_isContiguous(targets)); int preloadCases = 32; dim3 blocks, threads; int bx, by; int pixelsPerThread, filtersPerThread, colorsPerThread; // Worth playing with these parameters to find best values for your problem. // These values work relatively well, but not optimal for all problems. if (numFilterColors > 3) { filtersPerThread = numFiltersPerGroup % 64 == 0 ? 4 : numFiltersPerGroup % 32 == 0 ? 2 : 1; colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 6 : numFilterColors % 32 == 0 ? 8 : 4; by = (numFilterColors / colorsPerThread) % 8 == 0 ? 8 : 4; bx = numFiltersPerGroup % 128 == 0 ? 32 : 16; preloadCases = filtersPerThread * colorsPerThread < 32 ? 32 : 16; blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), numFilterColors / (by*colorsPerThread), filterPixels); THAssert(numFilterColors % (by*colorsPerThread) == 0); } else { // This is ugly but it's nice to spell it out clearly THAssert(numGroups == 1); // Just for sanity // NOTE: these things are only optimized for colors = 3. I didn't really test other cases. if (numFilters % 64 == 0) { // TODO: having a separate case for 128 would make things faster, but I probably don't care about 128 filtersPerThread = 4; pixelsPerThread = 2; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 48 == 0) { filtersPerThread = 3; pixelsPerThread = 4; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 32 == 0) { filtersPerThread = 2; pixelsPerThread = 2; by = 8; bx = 16; preloadCases = 16; } else { // This case is completely untested. It might be really slow. But no time now. filtersPerThread = 1; pixelsPerThread = 16; by = 16; bx = 16; preloadCases = 32; } blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by*pixelsPerThread)); } THAssert((by * bx) % preloadCases == 0); THAssert(numFilters % (bx * filtersPerThread) == 0); threads = dim3(bx, by); bool checkCaseBounds = numImages % preloadCases != 0; bool scale = scaleTargets != 0; if (!scale) { THCudaTensor_resize2d(targets, outputModuleChunks * numFilterColors*filterPixels, numFilters); } else { THAssert(targets->size[0] == outputModuleChunks * numFilterColors*filterPixels); THAssert(targets->size[1] == numFilters); } if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 48 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 48 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { hipTextureObject_t texImages = THCudaTensor_getTextureObject(images); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texImages)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else { hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } } getLastCudaError("weightActs: kernel execution failed"); } void convWeightActs(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1); } void convWeightActsSt(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum, float scaleTargets, float scaleOutput) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } void localWeightActs(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1); } void localWeightActsSt(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, scaleTargets, scaleOutput); }
d7ca3cc863e5f14e348019e82ff8bda7da015dd1.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/cudaconv2.cuh" #include "../../nvmatrix/include/nvmatrix_kernels.cuh" #define LO16(x) ((x) & 0x0000FFFF) #define HI16(x) ((x) >> 16) #define WA_LOOP(r) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_LOOP2(r) _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_IMLOAD(r) imPreload[r] = im[(r) * B_X * B_Y / preloadCases * imgPixels * imgStride]; #define WA_IMLOAD_2(r) imPreload[r] = images[imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride]; #define WA_IMLOAD_TX(r) imPreload[r] = tex1Dfetch<float>(images, imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride); #define WA_HALOAD(r) haPreload[r] = ha[(r) * B_X * B_Y / preloadCases * numImages * numModules]; #define WA_HALOAD_2(r) haPreload[r] = hidActs[hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules]; #define WA_HALOAD_TX(r) haPreload[r] = tex1Dfetch<float>(hidActs, hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules); __device__ __forceinline__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( const int my, const int mx, const int paddingStart, const int numModulesX, const int moduleStride, const int blockPixelY, const int blockPixelX, const int imgSizeX, const int imgStride, int& pixIdx, int& m) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image const int pxX = imgLoadModPosX + blockPixelX; pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image m = my * numModulesX + mx; } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __global__ void conv_weight_acts_c_kepler(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int partialSum, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int filterBlocksPerModule = numFilters / (B_X*filtersPerThread); const int outputModuleIdx = blockIdx.x / filterBlocksPerModule; const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % filterBlocksPerModule); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; images += loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += (outputModuleIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } __shared__ int pxIdxes[B_Y*pixelsPerThread]; //__shared__ bool isPxInImage[B_Y*pixelsPerThread]; for (int m = moduleIdx; m < moduleIdx + partialSum; m++) { __syncthreads(); if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride; int pxY = (imgLoadModPosY + (blockPixelOffset + tidx) / filterSize); int pxX = (imgLoadModPosX + (blockPixelOffset + tidx) % filterSize); int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; //isPxInImage[tidx] = ; } __syncthreads(); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X*filtersPerThread) { shHidActs[loadY+y][loadX]= hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } #pragma unroll for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) { //if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some imgGrads from filter pixels that don't exit (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0; } } } } //} __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int p = 0; p < pixelCache; p++) { #pragma unroll for (int c = 0; c < numColors; c++) { prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters * threadIdx.x determines filter * threadIdx.y determines color * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines color batch of B_Y * colorsPerThread * blockIdx.z determines pixel in filter * NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will * fail for filters >= 256*256. I'm assuming I won't ever use such large filters. * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * B_X * B_Y must be divisible by preloadCases */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __global__ void conv_weight_acts_mc_mf_kepler(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int partialSum, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int outputModuleIdx = blockIdx.x / numFilterBlocks; const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += outputModuleIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; //if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float prod[colorsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][f] = 0; } } for (int m = moduleIdx; m < moduleIdx + partialSum; m++) { const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image const int pxX = imgLoadModPosX + blockPixelX; const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) { for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // Checking this condition actually makes things faster ... :/ // So I've removed the !checkCaseBounds flag and just check it all the time. if (caseIdx + loadX < numImages) { /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some images from filter pixels that don't exist (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx]; } } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } } else { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = 0; } } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = 0; } } } __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f]; } } } } /* * Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters * threadIdx.x determines filter * threadIdx.y determines color * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines color batch of B_Y * colorsPerThread * blockIdx.z determines pixel in filter * NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will * fail for filters >= 256*256. I'm assuming I won't ever use such large filters. * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * B_X * B_Y must be divisible by preloadCases */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __global__ void conv_weight_acts_mc_mf_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; //if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float prod[colorsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][f] = 0; } } /* * Note; iterating this way is about 1% slower and uses a few more registers than iterating * over the modules linearly. But it's consistent with the preload routines, * so I'm using it. */ for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxX = imgLoadModPosX + blockPixelX; const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // Checking this condition actually makes things faster ... :/ // So I've removed the !checkCaseBounds flag and just check it all the time. if (caseIdx + loadX < numImages) { /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some images from filter pixels that don't exist (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx]; } } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } } else { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = 0; } } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = 0; } } } __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f]; } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __global__ void conv_weight_acts_c_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; images += loadX; hidActs += blockFilterIdx * numImages * numModules // + loadY * numImages * numModules + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; int pxY = (imgLoadModPosY + fYOff); int pxX = (imgLoadModPosX + fXOff); int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX]= hidActs[caseIdx + fIdx * numImages * numModules + m * numImages]; } } } else { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { // const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX] = 0; } } } #pragma unroll for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) { //if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some imgGrads from filter pixels that don't exit (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0; } } } } //} __syncthreads(); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int p = 0; p < pixelCache; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; } } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } #define WA_C3_LOOP(pp, c) _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } #define WA_C3_LOOP2(pp) _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < 3; ++c) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } \ } #define WA_3_FIDX(y) (((loadY + (y)*B_X*B_Y/preloadCases) % filtersPerThread) * B_X + (loadY + (y)*B_X*B_Y/preloadCases) / filtersPerThread) /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> //__launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (!doWork) { // hidActs -= // } // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [8] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { const int fIdx = WA_3_FIDX(y); // if (doWork) { haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fIdx * numImages * numModules + m * numImages); // } fidx[y] = fIdx * numImages * numModules; } } for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = my * numModulesX + mx; // __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); int myNext = my, mxNext = mx, mNext = m; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); mNext = myNext * numModulesX + mxNext; } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (lastBatch) { // ha = &hidActs[mNext * numImages]; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); WA_C3_LOOP(0,0); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); WA_C3_LOOP(0,1); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP(0,2); haPreload[6] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[6]); haPreload[7] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[7]); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // if (threadIdx.x == 3) targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> //__launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (!doWork) { // hidActs -= // } // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [8] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { const int fIdx = WA_3_FIDX(y); // if (doWork) { haPreload[y] = hidActs[hidActsOffset + fIdx * numImages * numModules + m * numImages]; // } fidx[y] = fIdx * numImages * numModules; } } for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = my * numModulesX + mx; // __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); int myNext = my, mxNext = mx, mNext = m; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); mNext = myNext * numModulesX + mxNext; } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (lastBatch) { // ha = &hidActs[mNext * numImages]; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx]; } } } } __syncthreads(); haPreload[0] = hidActs[hidActsOffset2 + fidx[0]]; haPreload[1] = hidActs[hidActsOffset2 + fidx[1]]; WA_C3_LOOP(0,0); haPreload[2] = hidActs[hidActsOffset2 + fidx[2]]; haPreload[3] = hidActs[hidActsOffset2 + fidx[3]]; WA_C3_LOOP(0,1); haPreload[4] = hidActs[hidActsOffset2 + fidx[4]]; haPreload[5] = hidActs[hidActsOffset2 + fidx[5]]; WA_C3_LOOP(0,2); haPreload[6] = hidActs[hidActsOffset2 + fidx[6]]; haPreload[7] = hidActs[hidActsOffset2 + fidx[7]]; __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // if (threadIdx.x == 3) targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [6] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; // if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { fidx[y] = WA_3_FIDX(y) * numImages * numModules; if (doWork) { // Not actually necessary, I think haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fidx[y] + m * numImages); } } // } int mNext = mStartY * numModulesX + mStartX; for (int my = mStartY; my < mEndY; my++) { // const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = mNext;//my * numModulesX + mx; // __syncthreads(); // const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1)); // if (!lastModule) { // const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1; // const int myNext = my + (mx + 1 == mEndX); // mNext = myNext * numModulesX + mxNext; // } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; // if (lastBatch) { // ha = &hidActs[mNext * numImages]; // } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP2(0); __syncthreads(); /* ================================================================================== * Iteration 1 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { // const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); WA_C3_LOOP2(2); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [6] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; // if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { fidx[y] = WA_3_FIDX(y) * numImages * numModules; if (doWork) { // Not actually necessary, I think haPreload[y] = hidActs[hidActsOffset + fidx[y] + m * numImages]; } } // } int mNext = mStartY * numModulesX + mStartX; for (int my = mStartY; my < mEndY; my++) { // const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = mNext;//my * numModulesX + mx; // __syncthreads(); // const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1)); // if (!lastModule) { // const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1; // const int myNext = my + (mx + 1 == mEndX); // mNext = myNext * numModulesX + mxNext; // } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; // if (lastBatch) { // ha = &hidActs[mNext * numImages]; // } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx]; } } } } __syncthreads(); haPreload[0] = hidActs[hidActsOffset2 + fidx[0]]; haPreload[1] = hidActs[hidActsOffset2 + fidx[1]]; haPreload[2] = hidActs[hidActsOffset2 + fidx[2]]; haPreload[3] = hidActs[hidActsOffset2 + fidx[3]]; haPreload[4] = hidActs[hidActsOffset2 + fidx[4]]; haPreload[5] = hidActs[hidActsOffset2 + fidx[5]]; WA_C3_LOOP2(0); __syncthreads(); /* ================================================================================== * Iteration 1 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { // const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx]; } } } } __syncthreads(); WA_C3_LOOP2(2); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(128, 4) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [8] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels. // All other ways cause crazy excessive register usage. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx); imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + idx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Almost certainly not necessary here. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages); haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + idx); } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); #pragma unroll for (int z = 0; z < 8; ++z) { WA_IMLOAD_TX(z); WA_LOOP2(z); } #pragma unroll for (int z = 0; z < 8; ++z) { WA_HALOAD_TX(z); WA_LOOP2(z+8); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(128, 4) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [8] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels. // All other ways cause crazy excessive register usage. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx); imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + idx]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Almost certainly not necessary here. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages); haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + idx]; } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); #pragma unroll for (int z = 0; z < 8; ++z) { WA_IMLOAD_2(z); WA_LOOP2(z); } #pragma unroll for (int z = 0; z < 8; ++z) { WA_HALOAD_2(z); WA_LOOP2(z+8); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [6] float haPreload[preloadCases*filtersPerThread/B_Y]; // [16] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } WA_LOOP(0); WA_LOOP(1); WA_LOOP(2); WA_LOOP(3); WA_LOOP(4); WA_LOOP(5); WA_IMLOAD_TX(0); WA_LOOP(6); WA_IMLOAD_TX(1); WA_LOOP(7); WA_IMLOAD_TX(2); WA_LOOP(8); WA_IMLOAD_TX(3); WA_LOOP(9); WA_IMLOAD_TX(4); WA_LOOP(10); WA_IMLOAD_TX(5); WA_LOOP(11); WA_HALOAD_TX(0); WA_LOOP(12); WA_HALOAD_TX(1); WA_LOOP(13); WA_HALOAD_TX(2); WA_LOOP(14); WA_HALOAD_TX(3); WA_LOOP(15); WA_HALOAD_TX(4); WA_LOOP(16); WA_HALOAD_TX(5); WA_LOOP(17); WA_HALOAD_TX(6); WA_LOOP(18); WA_HALOAD_TX(7); WA_LOOP(19); WA_HALOAD_TX(8); WA_LOOP(20); WA_HALOAD_TX(9); WA_LOOP(21); WA_HALOAD_TX(10); WA_LOOP(22); WA_HALOAD_TX(11); WA_LOOP(23); WA_HALOAD_TX(12); WA_LOOP(24); WA_HALOAD_TX(13); WA_LOOP(25); WA_HALOAD_TX(14); WA_LOOP(26); WA_HALOAD_TX(15); WA_LOOP(27); WA_LOOP(28); WA_LOOP(29); WA_LOOP(30); WA_LOOP(31); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [6] float haPreload[preloadCases*filtersPerThread/B_Y]; // [16] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages]; } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } WA_LOOP(0); WA_LOOP(1); WA_LOOP(2); WA_LOOP(3); WA_LOOP(4); WA_LOOP(5); WA_IMLOAD_2(0); WA_LOOP(6); WA_IMLOAD_2(1); WA_LOOP(7); WA_IMLOAD_2(2); WA_LOOP(8); WA_IMLOAD_2(3); WA_LOOP(9); WA_IMLOAD_2(4); WA_LOOP(10); WA_IMLOAD_2(5); WA_LOOP(11); WA_HALOAD_2(0); WA_LOOP(12); WA_HALOAD_2(1); WA_LOOP(13); WA_HALOAD_2(2); WA_LOOP(14); WA_HALOAD_2(3); WA_LOOP(15); WA_HALOAD_2(4); WA_LOOP(16); WA_HALOAD_2(5); WA_LOOP(17); WA_HALOAD_2(6); WA_LOOP(18); WA_HALOAD_2(7); WA_LOOP(19); WA_HALOAD_2(8); WA_LOOP(20); WA_HALOAD_2(9); WA_LOOP(21); WA_HALOAD_2(10); WA_LOOP(22); WA_HALOAD_2(11); WA_LOOP(23); WA_HALOAD_2(12); WA_LOOP(24); WA_HALOAD_2(13); WA_LOOP(25); WA_HALOAD_2(14); WA_LOOP(26); WA_HALOAD_2(15); WA_LOOP(27); WA_LOOP(28); WA_LOOP(29); WA_LOOP(30); WA_LOOP(31); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [4] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork && loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } } if (doWork && loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // const float* im = &images[caseIdx + preloadCases + pixIdx]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; // im = &images[pixIdxNext]; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; // ha = &hidActs[mNext * numImages]; } if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } } __syncthreads(); WA_LOOP(0); WA_IMLOAD_TX(0); WA_LOOP(1); WA_IMLOAD_TX(1); WA_LOOP(2); WA_IMLOAD_TX(2); WA_LOOP(3); WA_IMLOAD_TX(3); WA_LOOP(4); WA_HALOAD_TX(0); WA_LOOP(5); WA_HALOAD_TX(1); WA_LOOP(6); WA_HALOAD_TX(2); WA_LOOP(7); WA_HALOAD_TX(3); WA_LOOP(8); WA_HALOAD_TX(4); WA_LOOP(9); WA_HALOAD_TX(5); WA_LOOP(10); WA_HALOAD_TX(6); WA_LOOP(11); WA_HALOAD_TX(7); WA_LOOP(12); WA_LOOP(13); WA_LOOP(14); WA_LOOP(15); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [4] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork && loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx]; } } if (doWork && loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages]; } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // const float* im = &images[caseIdx + preloadCases + pixIdx]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; // im = &images[pixIdxNext]; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; // ha = &hidActs[mNext * numImages]; } if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } } __syncthreads(); WA_LOOP(0); WA_IMLOAD_2(0); WA_LOOP(1); WA_IMLOAD_2(1); WA_LOOP(2); WA_IMLOAD_2(2); WA_LOOP(3); WA_IMLOAD_2(3); WA_LOOP(4); WA_HALOAD_2(0); WA_LOOP(5); WA_HALOAD_2(1); WA_LOOP(6); WA_HALOAD_2(2); WA_LOOP(7); WA_HALOAD_2(3); WA_LOOP(8); WA_HALOAD_2(4); WA_LOOP(9); WA_HALOAD_2(5); WA_LOOP(10); WA_HALOAD_2(6); WA_LOOP(11); WA_HALOAD_2(7); WA_LOOP(12); WA_LOOP(13); WA_LOOP(14); WA_LOOP(15); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModules, numImages) * * targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * * TODO: you can get a slight speed boost for local non-convolutional units by writing special * routines for partialSum = 1. But I dunno if the code duplication is worth it... * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _weightActs(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int sumWidth, float scaleTargets, float scaleOutput) { int numFilterColors = numImgColors / numGroups; int imgStride = images->stride[0]; int numImages = images->size[1]; int imgPixels = images->size[0] / numImgColors; int imgSizeX = imgPixels / imgSizeY; int numModules = numModulesY * numModulesX; int numFilters = hidActs->size[0] / numModules; int numFiltersPerGroup = numFilters / numGroups; THAssert(numImgColors % numGroups == 0); THAssert(numFilters % (16*numGroups) == 0); THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 16 == 0))); THAssert(numGroups == 1 || numFilterColors % 16 == 0); THAssert(imgSizeY * imgSizeX == imgPixels); THAssert(images->size[0] == imgPixels * numImgColors); int filterPixels = filterSize * filterSize; int outputModuleChunksX = DIVUP(numModulesX, sumWidth); int outputModuleChunksY = DIVUP(numModulesY, sumWidth); int outputModuleChunks = outputModuleChunksX * outputModuleChunksY; // partialSum = partialSum == 0 ? numModules : partialSum; // THAssert(numModules % partialSum == 0); THAssert(hidActs->size[1] == numImages); // These routines don't handle the case when only part of the image is visited in the convolution THAssert(paddingStart <= 0); THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); THAssert(moduleStride <= filterSize); THAssert(numModules * numFilters == hidActs->size[0]); THAssert(THCudaTensor_isContiguous(hidActs)); THAssert(THCudaTensor_isContiguous(targets)); int preloadCases = 32; dim3 blocks, threads; int bx, by; int pixelsPerThread, filtersPerThread, colorsPerThread; // Worth playing with these parameters to find best values for your problem. // These values work relatively well, but not optimal for all problems. if (numFilterColors > 3) { filtersPerThread = numFiltersPerGroup % 64 == 0 ? 4 : numFiltersPerGroup % 32 == 0 ? 2 : 1; colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 6 : numFilterColors % 32 == 0 ? 8 : 4; by = (numFilterColors / colorsPerThread) % 8 == 0 ? 8 : 4; bx = numFiltersPerGroup % 128 == 0 ? 32 : 16; preloadCases = filtersPerThread * colorsPerThread < 32 ? 32 : 16; blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), numFilterColors / (by*colorsPerThread), filterPixels); THAssert(numFilterColors % (by*colorsPerThread) == 0); } else { // This is ugly but it's nice to spell it out clearly THAssert(numGroups == 1); // Just for sanity // NOTE: these things are only optimized for colors = 3. I didn't really test other cases. if (numFilters % 64 == 0) { // TODO: having a separate case for 128 would make things faster, but I probably don't care about 128 filtersPerThread = 4; pixelsPerThread = 2; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 48 == 0) { filtersPerThread = 3; pixelsPerThread = 4; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 32 == 0) { filtersPerThread = 2; pixelsPerThread = 2; by = 8; bx = 16; preloadCases = 16; } else { // This case is completely untested. It might be really slow. But no time now. filtersPerThread = 1; pixelsPerThread = 16; by = 16; bx = 16; preloadCases = 32; } blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by*pixelsPerThread)); } THAssert((by * bx) % preloadCases == 0); THAssert(numFilters % (bx * filtersPerThread) == 0); threads = dim3(bx, by); bool checkCaseBounds = numImages % preloadCases != 0; bool scale = scaleTargets != 0; if (!scale) { THCudaTensor_resize2d(targets, outputModuleChunks * numFilterColors*filterPixels, numFilters); } else { THAssert(targets->size[0] == outputModuleChunks * numFilterColors*filterPixels); THAssert(targets->size[1] == numFilters); } if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 48 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 48 == 0) { if ((THCudaTensor_nElement(images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(hidActs)*4 < TEXTURE_SIZE_MAX)) { cudaTextureObject_t texImages = THCudaTensor_getTextureObject(images); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(hidActs); cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texImages)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else { cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(images), THCudaTensor_data(hidActs), THCudaTensor_data(targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } } getLastCudaError("weightActs: kernel execution failed"); } void convWeightActs(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1); } void convWeightActsSt(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum, float scaleTargets, float scaleOutput) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } void localWeightActs(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1); } void localWeightActsSt(THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, scaleTargets, scaleOutput); }
7227cbd4e5ffa315e73d52327c7d03d2ba469f89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CS 4402 Distributed and Parallel Systems Assignment 2 Question 2: N^2/t thread blocks with t threads each, where t {64, 128, 256, 512} Sarah Whelan 250778849 TO RUN: nvcc q2_swhela2.cu -o q2_swhela2 ./q2_swhela2 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> int modBy = 103; // common prime num used for modding coefficient values during generation, multiplication, and addition void genPolynomials(int *polyA, int *polyB, int size); void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize); __global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy, int numBlocks); __global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy, int polySize); void checkCUDAError(const char* msg); int main() { srand(time(NULL)); int numTerms; // get user desired input on length of polynomials printf("Specify the number of terms in the polynomial by specifying the exponent on base 2. Value must be > 5 and <= 10, e.g. enter '6' if you want 2^6 terms (AKA 64 terms) per polynomial: "); scanf("%d", &numTerms); if (numTerms > 10 || numTerms < 5) { printf("Invalid entry. The minimum number of terms is 2^6 and the maximum number of terms is 2^10. Please enter 5 < term <= 10.\n"); return 1; } // then bitshift by input value to determine actual value of numTerms numTerms = 1 << numTerms; printf("Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n", numTerms, numTerms-1); int threadsPerBlock; printf("Specify the number of threads per thread block (t) as one of {64, 128, 256, 512}. Keep in mind that t must be less than or equal to %d to produce a valid result: ", numTerms); scanf("%d", &threadsPerBlock); if (threadsPerBlock > numTerms) { printf("Invalid entry. Value of threads per block must be less than or equal to the number of terms specified for each polynomial.\n"); return 1; } if (!(threadsPerBlock == 64 || threadsPerBlock == 128 || threadsPerBlock == 256 || threadsPerBlock == 512)) { printf("Invalid entry. Number of threads must be one of {64, 128, 256, 512}.\n"); return 1; } // calculate number of blocks: n^2 / t int blocks = (numTerms * numTerms) / threadsPerBlock; // instantiate and allocate host memory blocks to store each polynomial of size numTerms int *host_polyA, *host_polyB; host_polyA = (int *) malloc(numTerms * sizeof(int)); host_polyB = (int *) malloc(numTerms * sizeof(int)); // generate random polynomials of size numTerms printf("\nGenerating polynomials...\n\n"); genPolynomials(host_polyA, host_polyB, numTerms); printf("polyA:\n"); for (int i = 0; i < numTerms; i++) { printf("%dx^%d ", host_polyA[i], i); if (i != numTerms-1) { printf("+ "); } } printf("\n\npolyB:\n"); for (int i = 0; i < numTerms; i++) { printf("%dx^%d ", host_polyB[i], i); if (i != numTerms-1) { printf("+ "); } } printf("\n\n"); // determine degree of product int degreeOfProduct = (numTerms - 1) * 2; // e.g. degree(polyA, polyB) = 3 then x^3 * x^3 = x^6 and degree = numTerms - 1 // allocate blocks of memory on the host for storing the product with size degreeOfProduct + 1 (serial) // and numTerms*numTerms for the intermediary parallel product, as well asthe final parallel product // two different allocations in order to verify results at the end! int *host_product_serial, *host_product_parallel, *host_final_product; host_product_serial = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // sum of products is intrinsic host_product_parallel = (int *) malloc(numTerms * numTerms * sizeof(int)); // because of n threads in each n thread blocks host_final_product = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // final product from parallel version once summed // ensure all vals in host_product_parallel are 0 (this is done within the serial version so we don't need to worry about that one) for (int i = 0; i < numTerms*numTerms; i++) { host_product_parallel[i] = 0; } // ensure all vals in host_final_product are 0 for (int i = 0; i < degreeOfProduct+1; i++) { host_final_product[i] = 0; } // initialize and allocate memory on the devices for storing dev_polyA, dev_polyB, and dev_product int *dev_polyA, *dev_polyB, *dev_product; hipMalloc( (void **) &dev_polyA, numTerms * sizeof(int)); hipMalloc( (void **) &dev_polyB, numTerms * sizeof(int)); hipMalloc( (void **) &dev_product, numTerms * numTerms * sizeof(int)); // copy polynomials: host -> device (dest, src, size, direction) hipMemcpy(dev_polyA, host_polyA, numTerms * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_polyB, host_polyB, numTerms * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice); // setup kernel params & launch dim3 dimGrid(blocks); dim3 dimBlock(threadsPerBlock); hipLaunchKernelGGL(( multPolynomialsParallel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_polyA, dev_polyB, dev_product, numTerms, modBy, blocks); hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete checkCUDAError("kernel invocation"); // copy dev_product back into host_product_parallel (dest, src, size, direction) hipMemcpy(host_product_parallel, dev_product, numTerms * numTerms * sizeof(int), hipMemcpyDeviceToHost); /* ~~~ now we need to deal with the summation of intermediary products ~~~ */ // allocate device mem for final product int *dev_final; hipMalloc( (void **) &dev_final, (degreeOfProduct+1) * sizeof(int)); // copy zero'd host_final_product to dev_final and host_product_parallel to dev_product // (dest, src, size, direction) hipMemcpy(dev_final, host_final_product, (degreeOfProduct+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice); // parameters are (int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy, int polySize) hipLaunchKernelGGL(( sumProductsParallel), dim3(dimGrid), dim3(dimBlock), 0, 0, degreeOfProduct+1, threadsPerBlock, dev_final, dev_product, blocks, modBy, numTerms); hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete checkCUDAError("kernel invocation"); // copy summation of products back to host (dest, src, size, direction) hipMemcpy(host_final_product, dev_final, (degreeOfProduct+1) * sizeof(int), hipMemcpyDeviceToHost); // multiply polynomials in serial and write to host_product_serial for verification multPolynomialsSerial(host_polyA, host_polyB, numTerms, host_product_serial, degreeOfProduct+1); printf("Serial result:\n"); for (int i = 0; i < degreeOfProduct+1; i++) { printf("%dx^%d ", host_product_serial[i], i); if (i != degreeOfProduct) { printf("+ "); } } printf("\n\nParallel result:\n"); for (int i = 0; i < degreeOfProduct+1; i++) { printf("%dx^%d ", host_final_product[i], i); if (i != degreeOfProduct) { printf("+ "); } } printf("\n\n"); bool allRight = 1; for (int i = 0; i < degreeOfProduct+1; i++) { if (host_product_serial[i] == host_final_product[i]) { continue; } else { printf("Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n", i, host_product_serial[i], host_final_product[i]); allRight = 0; } } if (allRight) { printf("Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n"); } else { printf("Looks like there were some discrepancies. Verification failed.\n\n"); } // free host and device memory free(host_polyA); free(host_polyB); free(host_product_serial); free(host_product_parallel); free(host_final_product); hipFree(dev_polyA); hipFree(dev_polyB); hipFree(dev_product); hipFree(dev_final); return 0; } // genPolynomials takes two polynomials and their size (number of terms per polynomial), // and generates random coefficients for each term mod p void genPolynomials(int *polyA, int *polyB, int size) { // coefficient generation using rand mod p where p = 103 for (int i = 0; i < size; i++) { polyA[i] = rand() % modBy; if (polyA[i] == 0) { // we don't want any zeros!!! polyA[i] = 1; } polyB[i] = rand() % modBy; if (polyB[i] == 0) { polyB[i] = 1; } } } // multPolynomialsSerial takes two polynomials and their size, in addition to a memory block to place // the sum of products into, as well as the size of the product polynomial void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize) { int degreeOfTerms; // ensure all coefficients of product are 0 for (int i = 0; i < productSize; i++) { product[i] = 0; } // calculate sum of products for (int a = 0; a < polySize; a++) { // iterate through terms in A for (int b = 0; b < polySize; b++) { // for each term in A, iterate through all terms in B // add degrees (indices) to determine which index this product belongs to in the product array block degreeOfTerms = a + b; // add product of terms to previous sum and mod by 103 product[degreeOfTerms] = (product[degreeOfTerms] + polyA[a] * polyB[b]) % modBy; } } } // multPolynomialsParallel determines the intermediary products of the polynomial multiplication problem __global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy, int numBlocks) { int a, b, blocksPerA, blockPos; blocksPerA = numBlocks / polySize; // e.g. if numBlocks = 2048 and polySize = 512, 4 thread blocks will be assigned to one coefficient in A blockPos = blockIdx.x % blocksPerA; // i.e. is my thread block the first one assigned to A (blockPos = 0) or the 2nd (=1), 3rd (=2)? a = blockIdx.x / blocksPerA; // e.g. if blockId is 5, we need to access A[2] -> int 5/2 = 2 b = threadIdx.x + blockPos * blockDim.x; // multiple thread blocks are responsible for the elements in B, hence need to // take into account our block position to determine our B index int myIndex = blockDim.x * blockIdx.x + threadIdx.x; // where to write this thread's product product[myIndex] = (polyA[a] * polyB[b]) % modBy; } // sumProductsParallel uses prodSize threads, each thread in charge of summing common terms from the intermediary products __global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy, int polySize) { int responsibleFor = blockIdx.x * blockDim.x + threadIdx.x; // used to check which threads are going to be active during this step int blocksPerA = numBlocks / polySize; if (responsibleFor < prodSize) { // e.g. 1 < 7 so thread 1 is going to be in charge of summing the x^1 terms, threads >= prodSize will be inactive for remainder for (int blockNum = 0; blockNum < numBlocks; blockNum++) { // loop through blocks for (int indexInBlock = 0; indexInBlock < threadsPerBlock; indexInBlock++) { // loop through each index per block int blockPos = blockNum % blocksPerA; int degreeOfElement = (blockNum / blocksPerA) + indexInBlock + (blockDim.x * blockPos); if (indexInBlock == 0 && blockPos == 0 && degreeOfElement > responsibleFor) { return; // this thread is done summing its common terms } else if (degreeOfElement == responsibleFor) { // if this thread is responsible for the degree we just calculated int spotInProducts = blockNum * blockDim.x + indexInBlock; // get its actual index in products[] summedProduct[responsibleFor] = (summedProduct[responsibleFor] + products[spotInProducts]) % modBy; // and write that value into the final summedProduct[our degree] } } } } } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if(hipSuccess != err) { fprintf(stderr, "CUDA error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } }
7227cbd4e5ffa315e73d52327c7d03d2ba469f89.cu
/* CS 4402 Distributed and Parallel Systems Assignment 2 Question 2: N^2/t thread blocks with t threads each, where t ∈ {64, 128, 256, 512} Sarah Whelan 250778849 TO RUN: nvcc q2_swhela2.cu -o q2_swhela2 ./q2_swhela2 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> int modBy = 103; // common prime num used for modding coefficient values during generation, multiplication, and addition void genPolynomials(int *polyA, int *polyB, int size); void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize); __global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy, int numBlocks); __global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy, int polySize); void checkCUDAError(const char* msg); int main() { srand(time(NULL)); int numTerms; // get user desired input on length of polynomials printf("Specify the number of terms in the polynomial by specifying the exponent on base 2. Value must be > 5 and <= 10, e.g. enter '6' if you want 2^6 terms (AKA 64 terms) per polynomial: "); scanf("%d", &numTerms); if (numTerms > 10 || numTerms < 5) { printf("Invalid entry. The minimum number of terms is 2^6 and the maximum number of terms is 2^10. Please enter 5 < term <= 10.\n"); return 1; } // then bitshift by input value to determine actual value of numTerms numTerms = 1 << numTerms; printf("Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n", numTerms, numTerms-1); int threadsPerBlock; printf("Specify the number of threads per thread block (t) as one of {64, 128, 256, 512}. Keep in mind that t must be less than or equal to %d to produce a valid result: ", numTerms); scanf("%d", &threadsPerBlock); if (threadsPerBlock > numTerms) { printf("Invalid entry. Value of threads per block must be less than or equal to the number of terms specified for each polynomial.\n"); return 1; } if (!(threadsPerBlock == 64 || threadsPerBlock == 128 || threadsPerBlock == 256 || threadsPerBlock == 512)) { printf("Invalid entry. Number of threads must be one of {64, 128, 256, 512}.\n"); return 1; } // calculate number of blocks: n^2 / t int blocks = (numTerms * numTerms) / threadsPerBlock; // instantiate and allocate host memory blocks to store each polynomial of size numTerms int *host_polyA, *host_polyB; host_polyA = (int *) malloc(numTerms * sizeof(int)); host_polyB = (int *) malloc(numTerms * sizeof(int)); // generate random polynomials of size numTerms printf("\nGenerating polynomials...\n\n"); genPolynomials(host_polyA, host_polyB, numTerms); printf("polyA:\n"); for (int i = 0; i < numTerms; i++) { printf("%dx^%d ", host_polyA[i], i); if (i != numTerms-1) { printf("+ "); } } printf("\n\npolyB:\n"); for (int i = 0; i < numTerms; i++) { printf("%dx^%d ", host_polyB[i], i); if (i != numTerms-1) { printf("+ "); } } printf("\n\n"); // determine degree of product int degreeOfProduct = (numTerms - 1) * 2; // e.g. degree(polyA, polyB) = 3 then x^3 * x^3 = x^6 and degree = numTerms - 1 // allocate blocks of memory on the host for storing the product with size degreeOfProduct + 1 (serial) // and numTerms*numTerms for the intermediary parallel product, as well asthe final parallel product // two different allocations in order to verify results at the end! int *host_product_serial, *host_product_parallel, *host_final_product; host_product_serial = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // sum of products is intrinsic host_product_parallel = (int *) malloc(numTerms * numTerms * sizeof(int)); // because of n threads in each n thread blocks host_final_product = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // final product from parallel version once summed // ensure all vals in host_product_parallel are 0 (this is done within the serial version so we don't need to worry about that one) for (int i = 0; i < numTerms*numTerms; i++) { host_product_parallel[i] = 0; } // ensure all vals in host_final_product are 0 for (int i = 0; i < degreeOfProduct+1; i++) { host_final_product[i] = 0; } // initialize and allocate memory on the devices for storing dev_polyA, dev_polyB, and dev_product int *dev_polyA, *dev_polyB, *dev_product; cudaMalloc( (void **) &dev_polyA, numTerms * sizeof(int)); cudaMalloc( (void **) &dev_polyB, numTerms * sizeof(int)); cudaMalloc( (void **) &dev_product, numTerms * numTerms * sizeof(int)); // copy polynomials: host -> device (dest, src, size, direction) cudaMemcpy(dev_polyA, host_polyA, numTerms * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_polyB, host_polyB, numTerms * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), cudaMemcpyHostToDevice); // setup kernel params & launch dim3 dimGrid(blocks); dim3 dimBlock(threadsPerBlock); multPolynomialsParallel<<<dimGrid, dimBlock>>>(dev_polyA, dev_polyB, dev_product, numTerms, modBy, blocks); cudaThreadSynchronize(); // wait for ALL threads from all blocks to complete checkCUDAError("kernel invocation"); // copy dev_product back into host_product_parallel (dest, src, size, direction) cudaMemcpy(host_product_parallel, dev_product, numTerms * numTerms * sizeof(int), cudaMemcpyDeviceToHost); /* ~~~ now we need to deal with the summation of intermediary products ~~~ */ // allocate device mem for final product int *dev_final; cudaMalloc( (void **) &dev_final, (degreeOfProduct+1) * sizeof(int)); // copy zero'd host_final_product to dev_final and host_product_parallel to dev_product // (dest, src, size, direction) cudaMemcpy(dev_final, host_final_product, (degreeOfProduct+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), cudaMemcpyHostToDevice); // parameters are (int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy, int polySize) sumProductsParallel<<<dimGrid, dimBlock>>>(degreeOfProduct+1, threadsPerBlock, dev_final, dev_product, blocks, modBy, numTerms); cudaThreadSynchronize(); // wait for ALL threads from all blocks to complete checkCUDAError("kernel invocation"); // copy summation of products back to host (dest, src, size, direction) cudaMemcpy(host_final_product, dev_final, (degreeOfProduct+1) * sizeof(int), cudaMemcpyDeviceToHost); // multiply polynomials in serial and write to host_product_serial for verification multPolynomialsSerial(host_polyA, host_polyB, numTerms, host_product_serial, degreeOfProduct+1); printf("Serial result:\n"); for (int i = 0; i < degreeOfProduct+1; i++) { printf("%dx^%d ", host_product_serial[i], i); if (i != degreeOfProduct) { printf("+ "); } } printf("\n\nParallel result:\n"); for (int i = 0; i < degreeOfProduct+1; i++) { printf("%dx^%d ", host_final_product[i], i); if (i != degreeOfProduct) { printf("+ "); } } printf("\n\n"); bool allRight = 1; for (int i = 0; i < degreeOfProduct+1; i++) { if (host_product_serial[i] == host_final_product[i]) { continue; } else { printf("Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n", i, host_product_serial[i], host_final_product[i]); allRight = 0; } } if (allRight) { printf("Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n"); } else { printf("Looks like there were some discrepancies. Verification failed.\n\n"); } // free host and device memory free(host_polyA); free(host_polyB); free(host_product_serial); free(host_product_parallel); free(host_final_product); cudaFree(dev_polyA); cudaFree(dev_polyB); cudaFree(dev_product); cudaFree(dev_final); return 0; } // genPolynomials takes two polynomials and their size (number of terms per polynomial), // and generates random coefficients for each term mod p void genPolynomials(int *polyA, int *polyB, int size) { // coefficient generation using rand mod p where p = 103 for (int i = 0; i < size; i++) { polyA[i] = rand() % modBy; if (polyA[i] == 0) { // we don't want any zeros!!! polyA[i] = 1; } polyB[i] = rand() % modBy; if (polyB[i] == 0) { polyB[i] = 1; } } } // multPolynomialsSerial takes two polynomials and their size, in addition to a memory block to place // the sum of products into, as well as the size of the product polynomial void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize) { int degreeOfTerms; // ensure all coefficients of product are 0 for (int i = 0; i < productSize; i++) { product[i] = 0; } // calculate sum of products for (int a = 0; a < polySize; a++) { // iterate through terms in A for (int b = 0; b < polySize; b++) { // for each term in A, iterate through all terms in B // add degrees (indices) to determine which index this product belongs to in the product array block degreeOfTerms = a + b; // add product of terms to previous sum and mod by 103 product[degreeOfTerms] = (product[degreeOfTerms] + polyA[a] * polyB[b]) % modBy; } } } // multPolynomialsParallel determines the intermediary products of the polynomial multiplication problem __global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy, int numBlocks) { int a, b, blocksPerA, blockPos; blocksPerA = numBlocks / polySize; // e.g. if numBlocks = 2048 and polySize = 512, 4 thread blocks will be assigned to one coefficient in A blockPos = blockIdx.x % blocksPerA; // i.e. is my thread block the first one assigned to A (blockPos = 0) or the 2nd (=1), 3rd (=2)? a = blockIdx.x / blocksPerA; // e.g. if blockId is 5, we need to access A[2] -> int 5/2 = 2 b = threadIdx.x + blockPos * blockDim.x; // multiple thread blocks are responsible for the elements in B, hence need to // take into account our block position to determine our B index int myIndex = blockDim.x * blockIdx.x + threadIdx.x; // where to write this thread's product product[myIndex] = (polyA[a] * polyB[b]) % modBy; } // sumProductsParallel uses prodSize threads, each thread in charge of summing common terms from the intermediary products __global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy, int polySize) { int responsibleFor = blockIdx.x * blockDim.x + threadIdx.x; // used to check which threads are going to be active during this step int blocksPerA = numBlocks / polySize; if (responsibleFor < prodSize) { // e.g. 1 < 7 so thread 1 is going to be in charge of summing the x^1 terms, threads >= prodSize will be inactive for remainder for (int blockNum = 0; blockNum < numBlocks; blockNum++) { // loop through blocks for (int indexInBlock = 0; indexInBlock < threadsPerBlock; indexInBlock++) { // loop through each index per block int blockPos = blockNum % blocksPerA; int degreeOfElement = (blockNum / blocksPerA) + indexInBlock + (blockDim.x * blockPos); if (indexInBlock == 0 && blockPos == 0 && degreeOfElement > responsibleFor) { return; // this thread is done summing its common terms } else if (degreeOfElement == responsibleFor) { // if this thread is responsible for the degree we just calculated int spotInProducts = blockNum * blockDim.x + indexInBlock; // get its actual index in products[] summedProduct[responsibleFor] = (summedProduct[responsibleFor] + products[spotInProducts]) % modBy; // and write that value into the final summedProduct[our degree] } } } } } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if(cudaSuccess != err) { fprintf(stderr, "CUDA error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
a6613b1120d39cd0cc6965b5affe2971a786d3a4.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cmath> #include <cstdio> #include <cstdlib> #include <chrono> #include <hip/hip_runtime.h> #include "reference.h" __host__ __device__ inline float hd (const float2 ap, const float2 bp) { return (ap.x - bp.x) * (ap.x - bp.x) + (ap.y - bp.y) * (ap.y - bp.y); } __device__ __forceinline__ void atomic_max(float *address, float val) { unsigned int ret = __float_as_uint(*address); while(val > __uint_as_float(ret)) { unsigned int old = ret; if((ret = atomicCAS((unsigned int *)address, old, __float_as_uint(val))) == old) break; } } __global__ void computeDistance(const float2* __restrict__ Apoints, const float2* __restrict__ Bpoints, float* __restrict__ distance, const int numA, const int numB) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numA) return; float d = std::numeric_limits<float>::max(); float2 p = Apoints[i]; for (int j = 0; j < numB; j++) { float t = hd(p, Bpoints[j]); d = ::min(t, d); } atomic_max(distance, d); } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <number of points in space A>", argv[0]); printf(" <number of points in space B> <repeat>\n"); return 1; } const int num_Apoints = atoi(argv[1]); const int num_Bpoints = atoi(argv[2]); const int repeat = atoi(argv[3]); const size_t num_Apoints_bytes = sizeof(float2) * num_Apoints; const size_t num_Bpoints_bytes = sizeof(float2) * num_Bpoints; float2 *h_Apoints = (float2*) malloc (num_Apoints_bytes); float2 *h_Bpoints = (float2*) malloc (num_Bpoints_bytes); srand(123); for (int i = 0; i < num_Apoints; i++) { h_Apoints[i].x = (float)rand() / (float)RAND_MAX; h_Apoints[i].y = (float)rand() / (float)RAND_MAX; } for (int i = 0; i < num_Bpoints; i++) { h_Bpoints[i].x = (float)rand() / (float)RAND_MAX; h_Bpoints[i].y = (float)rand() / (float)RAND_MAX; } float2 *d_Apoints, *d_Bpoints; float *d_distance; hipMalloc((void**)&d_Apoints, num_Apoints_bytes); hipMalloc((void**)&d_Bpoints, num_Bpoints_bytes); hipMalloc((void**)&d_distance, 2 * sizeof(float)); hipMemcpy(d_Apoints, h_Apoints, num_Apoints_bytes, hipMemcpyHostToDevice); hipMemcpy(d_Bpoints, h_Bpoints, num_Bpoints_bytes, hipMemcpyHostToDevice); dim3 gridsA ((num_Apoints + 255) / 256); dim3 gridsB ((num_Bpoints + 255) / 256); dim3 blocks (256); float h_distance[2] = {-1.f, -1.f}; double time = 0.0; for (int i = 0; i < repeat; i++) { hipMemcpy(d_distance, h_distance, 2 * sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(computeDistance, gridsA, blocks, 0, 0, d_Apoints, d_Bpoints, d_distance, num_Apoints, num_Bpoints); hipLaunchKernelGGL(computeDistance, gridsB, blocks, 0, 0, d_Bpoints, d_Apoints, d_distance+1, num_Bpoints, num_Apoints); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); } printf("Average execution time of kernels: %f (ms)\n", (time * 1e-6f) / repeat); hipMemcpy(h_distance, d_distance, 2 * sizeof(float), hipMemcpyDeviceToHost); printf("Verifying the result may take a while..\n"); float r_distance = hausdorff_distance(h_Apoints, h_Bpoints, num_Apoints, num_Bpoints); float t_distance = ::max(h_distance[0], h_distance[1]); bool error = (fabsf(t_distance - r_distance)) > 1e-3f; printf("%s\n", error ? "FAIL" : "PASS"); free(h_Apoints); free(h_Bpoints); hipFree(d_distance); hipFree(d_Apoints); hipFree(d_Bpoints); return 0; }
a6613b1120d39cd0cc6965b5affe2971a786d3a4.cu
#include <algorithm> #include <cmath> #include <cstdio> #include <cstdlib> #include <chrono> #include <hip/hip_runtime.h> #include "reference.h" __host__ __device__ inline float hd (const float2 ap, const float2 bp) { return (ap.x - bp.x) * (ap.x - bp.x) + (ap.y - bp.y) * (ap.y - bp.y); } __device__ __forceinline__ void atomic_max(float *address, float val) { unsigned int ret = __float_as_uint(*address); while(val > __uint_as_float(ret)) { unsigned int old = ret; if((ret = atomicCAS((unsigned int *)address, old, __float_as_uint(val))) == old) break; } } __global__ void computeDistance(const float2* __restrict__ Apoints, const float2* __restrict__ Bpoints, float* __restrict__ distance, const int numA, const int numB) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numA) return; float d = std::numeric_limits<float>::max(); float2 p = Apoints[i]; for (int j = 0; j < numB; j++) { float t = hd(p, Bpoints[j]); d = std::min(t, d); } atomic_max(distance, d); } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <number of points in space A>", argv[0]); printf(" <number of points in space B> <repeat>\n"); return 1; } const int num_Apoints = atoi(argv[1]); const int num_Bpoints = atoi(argv[2]); const int repeat = atoi(argv[3]); const size_t num_Apoints_bytes = sizeof(float2) * num_Apoints; const size_t num_Bpoints_bytes = sizeof(float2) * num_Bpoints; float2 *h_Apoints = (float2*) malloc (num_Apoints_bytes); float2 *h_Bpoints = (float2*) malloc (num_Bpoints_bytes); srand(123); for (int i = 0; i < num_Apoints; i++) { h_Apoints[i].x = (float)rand() / (float)RAND_MAX; h_Apoints[i].y = (float)rand() / (float)RAND_MAX; } for (int i = 0; i < num_Bpoints; i++) { h_Bpoints[i].x = (float)rand() / (float)RAND_MAX; h_Bpoints[i].y = (float)rand() / (float)RAND_MAX; } float2 *d_Apoints, *d_Bpoints; float *d_distance; hipMalloc((void**)&d_Apoints, num_Apoints_bytes); hipMalloc((void**)&d_Bpoints, num_Bpoints_bytes); hipMalloc((void**)&d_distance, 2 * sizeof(float)); hipMemcpy(d_Apoints, h_Apoints, num_Apoints_bytes, hipMemcpyHostToDevice); hipMemcpy(d_Bpoints, h_Bpoints, num_Bpoints_bytes, hipMemcpyHostToDevice); dim3 gridsA ((num_Apoints + 255) / 256); dim3 gridsB ((num_Bpoints + 255) / 256); dim3 blocks (256); float h_distance[2] = {-1.f, -1.f}; double time = 0.0; for (int i = 0; i < repeat; i++) { hipMemcpy(d_distance, h_distance, 2 * sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(computeDistance, gridsA, blocks, 0, 0, d_Apoints, d_Bpoints, d_distance, num_Apoints, num_Bpoints); hipLaunchKernelGGL(computeDistance, gridsB, blocks, 0, 0, d_Bpoints, d_Apoints, d_distance+1, num_Bpoints, num_Apoints); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); } printf("Average execution time of kernels: %f (ms)\n", (time * 1e-6f) / repeat); hipMemcpy(h_distance, d_distance, 2 * sizeof(float), hipMemcpyDeviceToHost); printf("Verifying the result may take a while..\n"); float r_distance = hausdorff_distance(h_Apoints, h_Bpoints, num_Apoints, num_Bpoints); float t_distance = std::max(h_distance[0], h_distance[1]); bool error = (fabsf(t_distance - r_distance)) > 1e-3f; printf("%s\n", error ? "FAIL" : "PASS"); free(h_Apoints); free(h_Bpoints); hipFree(d_distance); hipFree(d_Apoints); hipFree(d_Bpoints); return 0; }
f27614b9783aa1f0ef3238dc5452af2f78809752.hip
// !!! This is a file automatically generated by hipify!!! /* * knnCuda.cu * * Created on: Jul 15, 2011 * Author: amatf */ #include <algorithm> #include <iostream> #include "book.h" #include "knnCuda.h" #if defined(_WIN32) || defined(_WIN64) #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #endif __constant__ float scaleCUDA[dimsImage]; int selectCUDAdeviceMaxMemory_() { hipDeviceProp_t prop; int count; int device = -1; unsigned long long int totalMemMax = 0; HANDLE_ERROR(hipGetDeviceCount(&count)); for (int i = 0; i < count; i++) { HANDLE_ERROR(hipGetDeviceProperties(&prop, i)); if (prop.totalGlobalMem > totalMemMax) { device = i; totalMemMax = (unsigned long)prop.totalGlobalMem; } } if (device < -1) printf("ERROR: at selectCUDAdeviceMaxMemory(): CUDA device not selected\n"); return device; } __device__ inline void findMaxPosition(float *distArray, float *minDist, int *pos, int KNN) { (*minDist) = distArray[0]; (*pos) = 0; for (int ii = 1; ii < KNN; ii++) { if ((*minDist) < distArray[ii]) { (*minDist) = distArray[ii]; (*pos) = ii; } } } //=========================================================================================== __global__ void __launch_bounds__(MAX_THREADS) knnKernelNoConstantMemory(int *indCUDA, float *distCUDA, float *queryCUDA, float *anchorCUDA, int ref_nb, long long int query_nb, int KNN) { // map from threadIdx/BlockIdx to pixel position int tid = threadIdx.x + blockIdx.x * blockDim.x; // int offset = blockDim.x * gridDim.x; if (KNN > maxKNN) return; // code is not ready for this if (tid >= query_nb) return; // int kMinusOne=maxKNN-1; float minDist[maxKNN]; // to mantain distance for each index: since K is very // small instead of a priority queue we keep a sorted // array int indAux[maxKNN]; float queryAux[dimsImage]; float minDistThr; float dist, distAux; int jj2, minPos; jj2 = tid; // global memory: organized as x_1,x_2,x_3,....,y_1,y_2,...,z_1,... to have // coalescent access queryAux[0] = queryCUDA[jj2]; jj2 += query_nb; queryAux[1] = queryCUDA[jj2]; jj2 += query_nb; queryAux[2] = queryCUDA[jj2]; int refIdx; for (int jj = 0; jj < KNN; jj++) minDist[jj] = 1e32; // equivalent to infinity. Thus, we know this element // has not been assigned minDistThr = 1e32; minPos = 0; for (int ii = 0; ii < ref_nb; ii++) { //__syncthreads();//to access constant memory coherently (this was effective // in CUDA 3.2) refIdx = ii; distAux = (queryAux[0] - anchorCUDA[refIdx]) * scaleCUDA[0]; dist = distAux * distAux; refIdx += ref_nb; if (dist > minDistThr) continue; distAux = (queryAux[1] - anchorCUDA[refIdx]) * scaleCUDA[1]; dist += distAux * distAux; refIdx += ref_nb; if (dist > minDistThr) continue; distAux = (queryAux[2] - anchorCUDA[refIdx]) * scaleCUDA[2]; dist += distAux * distAux; if (dist > minDistThr) continue; // insert element" minimize memory exchanges minDist[minPos] = dist; indAux[minPos] = ii; findMaxPosition(minDist, &minDistThr, &minPos, KNN); } __syncthreads(); // I need this to have coalescent memory access to inCUDA: // speeds up the code by x4 // copy indexes to global memory jj2 = tid; for (int jj = 0; jj < KNN; jj++) { // indCUDA[jj+jj2]=indAux[jj]; indCUDA[jj2] = indAux[jj]; jj2 += query_nb; } // copy distance if requested by user if (distCUDA != NULL) { jj2 = tid; for (int jj = 0; jj < KNN; jj++) { // indCUDA[jj+jj2]=indAux[jj]; distCUDA[jj2] = minDist[jj]; jj2 += query_nb; } } // update pointer for next query_point to check // tid+=offset; } //============================================================================================================= int knnCUDA_(int *ind, float *dist, float *query, float *ref, long long int query_nb, int ref_nb, int KNN, float *scale, int devCUDA) { // Variables and parameters // float* ref; // Pointer to reference point array: order is // cache friednly with the GPU // float* query; // Pointer to query point array: order is // x1,y1,z1,x2,y2,z2... to be cache friendly // int* ind; // Pointer to index array: size // query_nb*maxKNN. Again, order is GPU cache friendly. // float* dist; // Pointer to distance^2 array: size // query_nb*maxKNN. Again, order is GPU cache friendly. If pointer is null, // scaled euclidean distance to each nearest neighbor is not returned // int ref_nb // Reference point number // int query_nb // Query point number // float scale[dimsImage] // if (dimsImage != 3) { printf( "ERROR: at knnCUDA: code is not ready for dimsImage other than 3\n"); // TODO: change this to any dimensionality return 2; } if (ref_nb <= 0) // nothing to do. There are no possible assignments { if (dist != NULL) { for (long long int ii = 0; ii < query_nb * KNN; ii++) dist[ii] = 1e32f; // no assignments } return 0; } // CUDA variables int *indCUDA; float *queryCUDA; float *anchorCUDA; float *distCUDA = NULL; // set CUDA device HANDLE_ERROR(hipSetDevice(devCUDA)); // allocate memory on the GPU for the output: it will only be done once in the // whole program HANDLE_ERROR(hipMalloc( (void **)&indCUDA, query_nb * KNN * sizeof(int))); // should it be a texture memory?NO. It // does not fit in Cuda2Darray but it // fits in linear 1Dtexture, although it // does not seems to bring benefits HANDLE_ERROR( hipMalloc((void **)&queryCUDA, query_nb * dimsImage * sizeof(float))); HANDLE_ERROR( hipMalloc((void **)&anchorCUDA, ref_nb * dimsImage * sizeof(float))); if (dist != NULL) HANDLE_ERROR( hipMalloc((void **)&distCUDA, query_nb * KNN * sizeof(float))); // Copy image data to array HANDLE_ERROR(hipMemcpy(queryCUDA, query, dimsImage * query_nb * sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(anchorCUDA, ref, dimsImage * ref_nb * sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpyToSymbol( scaleCUDA, scale, dimsImage * sizeof(float))); // constant memory // prepare to launch kernel int numThreads = min(MAX_THREADS, (int)query_nb); int numGrids = min( MAX_BLOCKS, (int)(query_nb + numThreads - 1) / numThreads); // TODO: play with these numbers to optimize hipLaunchKernelGGL(( knnKernelNoConstantMemory), dim3(numGrids), dim3(numThreads), 0, 0, indCUDA, distCUDA, queryCUDA, anchorCUDA, ref_nb, query_nb, KNN); HANDLE_ERROR_KERNEL; // copy results back HANDLE_ERROR(hipMemcpy(ind, indCUDA, query_nb * KNN * sizeof(int), hipMemcpyDeviceToHost)); // retrieve indexes: // memcopy is synchronous // unless stated otherwise if (distCUDA != NULL) HANDLE_ERROR(hipMemcpy(dist, distCUDA, query_nb * KNN * sizeof(float), hipMemcpyDeviceToHost)); // free memory HANDLE_ERROR(hipFree(indCUDA)); HANDLE_ERROR(hipFree(queryCUDA)); HANDLE_ERROR(hipFree(anchorCUDA)); if (distCUDA != NULL) HANDLE_ERROR(hipFree(distCUDA)); return 0; } //=================================================================================================== int allocateGPUMemoryForKnnCUDA_(float *queryTemp, float **queryCUDA, int **indCUDA, long long int query_nb, float *scale, int KNN) { HANDLE_ERROR(hipMalloc((void **)&(*indCUDA), query_nb * KNN * sizeof(int))); HANDLE_ERROR( hipMalloc((void **)&(*queryCUDA), query_nb * dimsImage * sizeof(float))); // Copy image data to array HANDLE_ERROR(hipMemcpy((*queryCUDA), queryTemp, dimsImage * query_nb * sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpyToSymbol( scaleCUDA, scale, dimsImage * sizeof(float))); // constant memory return 0; } void setDeviceCUDA_(int devCUDA) { // WE ASSUME qeuryCUDA AND indCUDA HAVE BEEN ALLOCATED ALREADY AND MEMORY // TRANSFERRED TO THE GPU HANDLE_ERROR(hipSetDevice(devCUDA)); } //==================================================================================================== void deallocateGPUMemoryForKnnCUDA_(float **queryCUDA, int **indCUDA) { HANDLE_ERROR(hipFree(*indCUDA)); (*indCUDA) = NULL; HANDLE_ERROR(hipFree(*queryCUDA)); (*queryCUDA) = NULL; } //============================================================== void uploadScaleCUDA_(float *scale) { HANDLE_ERROR(hipMemcpyToSymbol( scaleCUDA, scale, dimsImage * sizeof(float))); // constant memory }
f27614b9783aa1f0ef3238dc5452af2f78809752.cu
/* * knnCuda.cu * * Created on: Jul 15, 2011 * Author: amatf */ #include <algorithm> #include <iostream> #include "book.h" #include "knnCuda.h" #if defined(_WIN32) || defined(_WIN64) #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #endif __constant__ float scaleCUDA[dimsImage]; int selectCUDAdeviceMaxMemory_() { cudaDeviceProp prop; int count; int device = -1; unsigned long long int totalMemMax = 0; HANDLE_ERROR(cudaGetDeviceCount(&count)); for (int i = 0; i < count; i++) { HANDLE_ERROR(cudaGetDeviceProperties(&prop, i)); if (prop.totalGlobalMem > totalMemMax) { device = i; totalMemMax = (unsigned long)prop.totalGlobalMem; } } if (device < -1) printf("ERROR: at selectCUDAdeviceMaxMemory(): CUDA device not selected\n"); return device; } __device__ inline void findMaxPosition(float *distArray, float *minDist, int *pos, int KNN) { (*minDist) = distArray[0]; (*pos) = 0; for (int ii = 1; ii < KNN; ii++) { if ((*minDist) < distArray[ii]) { (*minDist) = distArray[ii]; (*pos) = ii; } } } //=========================================================================================== __global__ void __launch_bounds__(MAX_THREADS) knnKernelNoConstantMemory(int *indCUDA, float *distCUDA, float *queryCUDA, float *anchorCUDA, int ref_nb, long long int query_nb, int KNN) { // map from threadIdx/BlockIdx to pixel position int tid = threadIdx.x + blockIdx.x * blockDim.x; // int offset = blockDim.x * gridDim.x; if (KNN > maxKNN) return; // code is not ready for this if (tid >= query_nb) return; // int kMinusOne=maxKNN-1; float minDist[maxKNN]; // to mantain distance for each index: since K is very // small instead of a priority queue we keep a sorted // array int indAux[maxKNN]; float queryAux[dimsImage]; float minDistThr; float dist, distAux; int jj2, minPos; jj2 = tid; // global memory: organized as x_1,x_2,x_3,....,y_1,y_2,...,z_1,... to have // coalescent access queryAux[0] = queryCUDA[jj2]; jj2 += query_nb; queryAux[1] = queryCUDA[jj2]; jj2 += query_nb; queryAux[2] = queryCUDA[jj2]; int refIdx; for (int jj = 0; jj < KNN; jj++) minDist[jj] = 1e32; // equivalent to infinity. Thus, we know this element // has not been assigned minDistThr = 1e32; minPos = 0; for (int ii = 0; ii < ref_nb; ii++) { //__syncthreads();//to access constant memory coherently (this was effective // in CUDA 3.2) refIdx = ii; distAux = (queryAux[0] - anchorCUDA[refIdx]) * scaleCUDA[0]; dist = distAux * distAux; refIdx += ref_nb; if (dist > minDistThr) continue; distAux = (queryAux[1] - anchorCUDA[refIdx]) * scaleCUDA[1]; dist += distAux * distAux; refIdx += ref_nb; if (dist > minDistThr) continue; distAux = (queryAux[2] - anchorCUDA[refIdx]) * scaleCUDA[2]; dist += distAux * distAux; if (dist > minDistThr) continue; // insert element" minimize memory exchanges minDist[minPos] = dist; indAux[minPos] = ii; findMaxPosition(minDist, &minDistThr, &minPos, KNN); } __syncthreads(); // I need this to have coalescent memory access to inCUDA: // speeds up the code by x4 // copy indexes to global memory jj2 = tid; for (int jj = 0; jj < KNN; jj++) { // indCUDA[jj+jj2]=indAux[jj]; indCUDA[jj2] = indAux[jj]; jj2 += query_nb; } // copy distance if requested by user if (distCUDA != NULL) { jj2 = tid; for (int jj = 0; jj < KNN; jj++) { // indCUDA[jj+jj2]=indAux[jj]; distCUDA[jj2] = minDist[jj]; jj2 += query_nb; } } // update pointer for next query_point to check // tid+=offset; } //============================================================================================================= int knnCUDA_(int *ind, float *dist, float *query, float *ref, long long int query_nb, int ref_nb, int KNN, float *scale, int devCUDA) { // Variables and parameters // float* ref; // Pointer to reference point array: order is // cache friednly with the GPU // float* query; // Pointer to query point array: order is // x1,y1,z1,x2,y2,z2... to be cache friendly // int* ind; // Pointer to index array: size // query_nb*maxKNN. Again, order is GPU cache friendly. // float* dist; // Pointer to distance^2 array: size // query_nb*maxKNN. Again, order is GPU cache friendly. If pointer is null, // scaled euclidean distance to each nearest neighbor is not returned // int ref_nb // Reference point number // int query_nb // Query point number // float scale[dimsImage] // if (dimsImage != 3) { printf( "ERROR: at knnCUDA: code is not ready for dimsImage other than 3\n"); // TODO: change this to any dimensionality return 2; } if (ref_nb <= 0) // nothing to do. There are no possible assignments { if (dist != NULL) { for (long long int ii = 0; ii < query_nb * KNN; ii++) dist[ii] = 1e32f; // no assignments } return 0; } // CUDA variables int *indCUDA; float *queryCUDA; float *anchorCUDA; float *distCUDA = NULL; // set CUDA device HANDLE_ERROR(cudaSetDevice(devCUDA)); // allocate memory on the GPU for the output: it will only be done once in the // whole program HANDLE_ERROR(cudaMalloc( (void **)&indCUDA, query_nb * KNN * sizeof(int))); // should it be a texture memory?NO. It // does not fit in Cuda2Darray but it // fits in linear 1Dtexture, although it // does not seems to bring benefits HANDLE_ERROR( cudaMalloc((void **)&queryCUDA, query_nb * dimsImage * sizeof(float))); HANDLE_ERROR( cudaMalloc((void **)&anchorCUDA, ref_nb * dimsImage * sizeof(float))); if (dist != NULL) HANDLE_ERROR( cudaMalloc((void **)&distCUDA, query_nb * KNN * sizeof(float))); // Copy image data to array HANDLE_ERROR(cudaMemcpy(queryCUDA, query, dimsImage * query_nb * sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(anchorCUDA, ref, dimsImage * ref_nb * sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpyToSymbol( scaleCUDA, scale, dimsImage * sizeof(float))); // constant memory // prepare to launch kernel int numThreads = min(MAX_THREADS, (int)query_nb); int numGrids = min( MAX_BLOCKS, (int)(query_nb + numThreads - 1) / numThreads); // TODO: play with these numbers to optimize knnKernelNoConstantMemory<<<numGrids, numThreads>>>( indCUDA, distCUDA, queryCUDA, anchorCUDA, ref_nb, query_nb, KNN); HANDLE_ERROR_KERNEL; // copy results back HANDLE_ERROR(cudaMemcpy(ind, indCUDA, query_nb * KNN * sizeof(int), cudaMemcpyDeviceToHost)); // retrieve indexes: // memcopy is synchronous // unless stated otherwise if (distCUDA != NULL) HANDLE_ERROR(cudaMemcpy(dist, distCUDA, query_nb * KNN * sizeof(float), cudaMemcpyDeviceToHost)); // free memory HANDLE_ERROR(cudaFree(indCUDA)); HANDLE_ERROR(cudaFree(queryCUDA)); HANDLE_ERROR(cudaFree(anchorCUDA)); if (distCUDA != NULL) HANDLE_ERROR(cudaFree(distCUDA)); return 0; } //=================================================================================================== int allocateGPUMemoryForKnnCUDA_(float *queryTemp, float **queryCUDA, int **indCUDA, long long int query_nb, float *scale, int KNN) { HANDLE_ERROR(cudaMalloc((void **)&(*indCUDA), query_nb * KNN * sizeof(int))); HANDLE_ERROR( cudaMalloc((void **)&(*queryCUDA), query_nb * dimsImage * sizeof(float))); // Copy image data to array HANDLE_ERROR(cudaMemcpy((*queryCUDA), queryTemp, dimsImage * query_nb * sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpyToSymbol( scaleCUDA, scale, dimsImage * sizeof(float))); // constant memory return 0; } void setDeviceCUDA_(int devCUDA) { // WE ASSUME qeuryCUDA AND indCUDA HAVE BEEN ALLOCATED ALREADY AND MEMORY // TRANSFERRED TO THE GPU HANDLE_ERROR(cudaSetDevice(devCUDA)); } //==================================================================================================== void deallocateGPUMemoryForKnnCUDA_(float **queryCUDA, int **indCUDA) { HANDLE_ERROR(cudaFree(*indCUDA)); (*indCUDA) = NULL; HANDLE_ERROR(cudaFree(*queryCUDA)); (*queryCUDA) = NULL; } //============================================================== void uploadScaleCUDA_(float *scale) { HANDLE_ERROR(cudaMemcpyToSymbol( scaleCUDA, scale, dimsImage * sizeof(float))); // constant memory }
0a3d98630c39910c324c80d749de683ef9c0f5ef.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "stencilConst1.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *src = NULL; hipMalloc(&src, XSIZE*YSIZE); float *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( stencilConst1), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( stencilConst1), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( stencilConst1), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0a3d98630c39910c324c80d749de683ef9c0f5ef.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "stencilConst1.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); float *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); stencilConst1<<<gridBlock,threadBlock>>>(src,dst,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { stencilConst1<<<gridBlock,threadBlock>>>(src,dst,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { stencilConst1<<<gridBlock,threadBlock>>>(src,dst,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f4a37197e69037eabffa25cbee0107ff4e474e76.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "svd3_cuda.h" #include <stdio.h> #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # error printf is only supported on devices of compute capability 2.0 and higher, please compile with -arch=sm_20 or higher #endif extern "C" void runCudaPart(); __host__ __device__ __forceinline__ void printMat3(float a11, float a12, float a13, float a21, float a22, float a23, float a31, float a32, float a33) { printf("%f %f %f \n", a11, a12, a13); printf("%f %f %f \n", a21, a22, a23); printf("%f %f %f \n", a31, a32, a33); } __global__ void svd3_test() { int tid = blockIdx.x; float a11, a12, a13, a21, a22, a23, a31, a32, a33; a11= -0.558253; a12 = -0.0461681; a13 = -0.505735; a21 = -0.411397; a22 = 0.0365854; a23 = 0.199707; a31 = 0.285389; a32 =-0.313789; a33 = 0.200189; float u11, u12, u13, u21, u22, u23, u31, u32, u33; float s11, s12, s13, s21, s22, s23, s31, s32, s33; float v11, v12, v13, v21, v22, v23, v31, v32, v33; svd(a11, a12, a13, a21, a22, a23, a31, a32, a33, u11, u12, u13, u21, u22, u23, u31, u32, u33, s11, s12, s13, s21, s22, s23, s31, s32, s33, v11, v12, v13, v21, v22, v23, v31, v32, v33); } void runCudaPart() { // all your cuda code here hipLaunchKernelGGL(( svd3_test), dim3(1),dim3(1), 0, 0, ); // 5 blocks, 1 GPU thread each hipDeviceSynchronize(); }
f4a37197e69037eabffa25cbee0107ff4e474e76.cu
#include <cuda.h> #include "svd3_cuda.h" #include <stdio.h> #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # error printf is only supported on devices of compute capability 2.0 and higher, please compile with -arch=sm_20 or higher #endif extern "C" void runCudaPart(); __host__ __device__ __forceinline__ void printMat3(float a11, float a12, float a13, float a21, float a22, float a23, float a31, float a32, float a33) { printf("%f %f %f \n", a11, a12, a13); printf("%f %f %f \n", a21, a22, a23); printf("%f %f %f \n", a31, a32, a33); } __global__ void svd3_test() { int tid = blockIdx.x; float a11, a12, a13, a21, a22, a23, a31, a32, a33; a11= -0.558253; a12 = -0.0461681; a13 = -0.505735; a21 = -0.411397; a22 = 0.0365854; a23 = 0.199707; a31 = 0.285389; a32 =-0.313789; a33 = 0.200189; float u11, u12, u13, u21, u22, u23, u31, u32, u33; float s11, s12, s13, s21, s22, s23, s31, s32, s33; float v11, v12, v13, v21, v22, v23, v31, v32, v33; svd(a11, a12, a13, a21, a22, a23, a31, a32, a33, u11, u12, u13, u21, u22, u23, u31, u32, u33, s11, s12, s13, s21, s22, s23, s31, s32, s33, v11, v12, v13, v21, v22, v23, v31, v32, v33); } void runCudaPart() { // all your cuda code here svd3_test<<<1,1>>>(); // 5 blocks, 1 GPU thread each cudaDeviceSynchronize(); }
7db58ef1378cde455255db11eb82cf902929aff3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "thrust\device_vector.h" #include "thrust\host_vector.h" #include "thrust\random\normal_distribution.h" #include "thrust\random\linear_congruential_engine.h" #include "sm_35_atomic_functions.h" #include <stdio.h> #include <iostream> #include <vector> #include "idx.h" #include "neuron.h" #include "dot_adder.h" __global__ void Input_Layer_Thread(neuron* n, idx_content_img* im, int index) { //printf("Ax"); int id = blockIdx.x + threadIdx.x; double d = 35; n[id].output = tanh((double)(((double)im[index].values[id] - d)) / (d*3.7)); //printf("Bx "); } __global__ void Input_Layer_Thread_vec(neuron* n, int* in_vec, int in_sz, int index) { //printf("Ax"); int id = blockIdx.x + threadIdx.x; n[id].output = ((in_vec[(id*(int)powf(2, in_sz)) + index])); //printf("<%f,%d>", n[id].output, (id*(int)powf(2, in_sz)) + index); } __global__ void Output_Layer_SumGenerator_Thread(neuron* n) { int id = blockIdx.x + threadIdx.x; n[id].sum = -n[id].bias; for (int j = 0; j < n[id].in_no; j++) { n[id].sum += n[id].input_n[j]->output * n[id].input_weight[j]; } } __global__ void Output_Layer_SoftmaxSum_Thread(neuron* n, int out_sz, int o_l_sz_r2, double* tmp) { __shared__ double d_sum[2048]; //printf("OOO"); if (threadIdx.x < out_sz) d_sum[threadIdx.x] = powf(12, n[threadIdx.x].sum); else d_sum[threadIdx.x] = 0; //printf("__"); //__syncthreads(); //printf("ttA"); if (threadIdx.x == 0) { /*int tm = 16; for (int i = 0; i < 5; i++) { tm /= 2; dot_adder << <1, tm >> > (4, tm); // lg16 = 4 //__syncthreads(); }*/ for (int i = 1; i < o_l_sz_r2; i++) { d_sum[0] += d_sum[i]; } *tmp = d_sum[0]; } //printf("BCCD "); } __global__ void Output_Layer_Thread(neuron* n, uint8_t* values, int index, double* tmp) { int digit = values[index]; double d = *tmp; int id = blockIdx.x + threadIdx.x; n[id].output = powf(12, n[id].sum) / d; if (id == digit) { n[id].error = n[id].output - 1; } else { n[id].error = n[id].output; } } __global__ void Output_Layer_Thread_vec(neuron* n, int* out_vec, int out_sz, int index, double* tmp) { double d = *tmp; int id = blockIdx.x + threadIdx.x; n[id].output = tanhf(n[id].sum);//(tanhf(n[id].sum) + 1) / 2;//powf(12, n[id].sum) / d; if (out_vec[(id*(int)powf(2, out_sz)) + index] == 1) { n[id].error = n[id].output - 1;// -(double)out_vec[(id*(int)powf(2, out_sz)) + index]; } else { n[id].error = n[id].output; } n[id].error *= (1 - n[id].output*n[id].output);//n[id].output*(1 - n[id].output); printf(" {%f:%d:%f}", n[id].output, out_vec[(id*(int)powf(2, out_sz)) + index], n[id].error); } __global__ void Output_Layer_Test_Thread(neuron* n, uint8_t* values, int index, int* output) { int digit = values[index]; int _i = 0; double opp = 0; /*Softmax Output*/ double d = 0; for (int i = 0; i < 10; i++) { n[i].sum = -n[i].bias; for (int j = 0; j < n[i].in_no; j++) { n[i].sum += n[i].input_n[j]->output * n[i].input_weight[j]; } d += powf(12, n[i].sum); } for (int i = 0; i < 10; i++) { n[i].output = powf(12, n[i].sum) / d; if (opp < n[i].output) { opp = n[i].output; _i = i; } } if (_i == digit) { printf("%d=>", _i); *output = *output + 1; } else printf("\n"); } __global__ void Output_Layer_Test_Thread_vec(neuron* n, int* out_vec, int out_sz, int index, int* output) { int _i = 0; double opp = 0; /*Softmax Output*/ double d = 0; for (int i = 0; i < out_sz; i++) { n[i].sum = -n[i].bias; for (int j = 0; j < n[i].in_no; j++) { n[i].sum += n[i].input_n[j]->output * n[i].input_weight[j]; } d += powf(12, n[i].sum); } for (int i = 0; i < out_sz; i++) { n[i].output = tanhf(n[i].sum);//(tanhf(n[i].sum) + 1) / 2;//powf(12, n[i].sum) / d; if (out_vec[(i*(int)powf(2, out_sz)) + index] - n[i].output == 0) { ++_i; } printf("[%f]<%d> ", n[i].output, out_vec[(i*(int)powf(2, out_sz)) + index]); } if (_i == out_sz) { printf("%d=>", _i); *output = *output + 1; } else printf("\n{%d}", _i); } __global__ void dot_product_FeedForward(neuron* n, int r_b) { int id = threadIdx.x; neuron* self = &n[blockIdx.x]; //int base = (int)ceilf(log2f(self->in_no)); //printf("->%d", base); __shared__ double d[2048]; if (id < self->in_no) d[id] = self->input_weight[id] * self->input_n[id]->output; else d[id] = 0; //__syncthreads(); if (id == 0) { for (int i = 1; i < r_b; i++) { d[0] += d[i]; } self->sum = d[0]; //if (self->in_no > 0) { self->output = tanh(self->sum); } } //__syncthreads(); } __global__ void dot_product_BackwardPropogation(neuron* n) { int id = threadIdx.x; neuron* self = &n[blockIdx.x]; //int base = (int)ceilf(log2f(self->in_no)); //printf("->%d", base); __shared__ double d[64]; if (id < self->out_no) d[id] = self->output_n[id]->error * self->output_weight[id]; else d[id] = 0; __syncthreads(); if (id == 0) { for (int i = 1; i < 64; i++) { d[0] += d[i]; } self->error = d[0]; self->error *= (1 - self->output*self->output); } //__syncthreads(); } __global__ void ForwardPropogation(neuron* n) { int id = blockIdx.x + threadIdx.x; neuron* self = &n[id]; //Calculate Sum, Output=> self->sum = 0; for (int i = 0; i < self->in_no; i++) { self->sum += self->input_weight[i] * self->input_n[i]->output; }//*/ //printf("[%f] ", self->sum); if (self->in_no > 0) { self->output = tanh(self->sum); } } __global__ void BackPropogation(neuron* n) { //printf("Bps"); int id = blockIdx.x + threadIdx.x; neuron* self = &n[id]; self->error = 0; for (int i = 0; i < self->out_no; i++) { self->error += self->output_n[i]->error * self->output_weight[i]; } self->error *= (1 - self->output*self->output); // Function Derivative, Derivative of tanh //printf("[%f] ", self->error); //printf("Bpe "); } __global__ void DeltaWeightPropogation(neuron* n) { //printf("Dps"); int id = blockIdx.x + threadIdx.x; neuron* self = &n[id]; self->bias += self->learning_rate* ((self->error)); for (int i = 0; i < self->in_no; i++) { self->input_weight[i] -= self->learning_rate * ((self->input_n[i]->output * self->error) - (self->input_weight[i] * self->regularization)); } //printf("Dpe "); } __global__ void linker_thread(neuron* n, int n_no, neuron* in, int in_no) { for (int i = 0; i < in_no; i++) { in[i].output_weight = new double[n_no]; in[i].output_n = new neuron*[n_no]; } minstd_rand rng; normal_distribution<double> dist(0, 1 / powf(in_no, 0.5)); for (int i = 0; i < n_no; i++) { n[i].input_weight = new double[in_no]; n[i].input_n = new neuron*[in_no]; for (int j = 0; j < in_no; j++) { n[i].input_weight[n[i].in_no] = dist(rng); n[i].input_n[n[i].in_no] = &in[j]; in[j].output_weight[in[j].out_no] = dist(rng); in[j].output_n[in[j].out_no] = &n[i]; ++n[i].in_no; ++in[j].out_no; } printf(">-%d-<", n[i].in_no); } printf("\nLayer %d Linked to %d and initialized", n_no, in_no); } void trainer_thread(neuron* in_l, neuron** h_l, int hidden_layers, int* hidden_n, neuron* o_l, idx_content_img* img_train, uint8_t* lbl_train, double* softmax_sum, int _i, int _n) { for (int i = _i; i < _n; i++) { for (int j = 0; j < 1; j++) { //Input_Layer_thread << <1, 1 >> > (in_l, img_train, i); Input_Layer_Thread << <28 * 28, 1 >> > (in_l, img_train, i); for (int m = 0; m < hidden_layers; m++) ForwardPropogation << <hidden_n[m], 1 >> > (h_l[m]); Output_Layer_SumGenerator_Thread << <10, 1 >> > (o_l); //Output_Layer_SoftmaxSummation_thread << <1, 1 >> > (o_l, softmax_sum); Output_Layer_Thread << <10, 1 >> > (o_l, lbl_train, i, softmax_sum); for (int m = 0; m < hidden_layers; m++) BackPropogation << <hidden_n[m], 1 >> > (h_l[m]); for (int m = 0; m < hidden_layers; m++) DeltaWeightPropogation << <hidden_n[m], 1 >> > (h_l[m]); DeltaWeightPropogation << <10, 1 >> > (o_l); //printf("\nas %d", i); } //printf("\n->"); } //printf("\nasdssdd"); }
7db58ef1378cde455255db11eb82cf902929aff3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "thrust\device_vector.h" #include "thrust\host_vector.h" #include "thrust\random\normal_distribution.h" #include "thrust\random\linear_congruential_engine.h" #include "sm_35_atomic_functions.h" #include <stdio.h> #include <iostream> #include <vector> #include "idx.h" #include "neuron.h" #include "dot_adder.h" __global__ void Input_Layer_Thread(neuron* n, idx_content_img* im, int index) { //printf("Ax"); int id = blockIdx.x + threadIdx.x; double d = 35; n[id].output = tanh((double)(((double)im[index].values[id] - d)) / (d*3.7)); //printf("Bx "); } __global__ void Input_Layer_Thread_vec(neuron* n, int* in_vec, int in_sz, int index) { //printf("Ax"); int id = blockIdx.x + threadIdx.x; n[id].output = ((in_vec[(id*(int)powf(2, in_sz)) + index])); //printf("<%f,%d>", n[id].output, (id*(int)powf(2, in_sz)) + index); } __global__ void Output_Layer_SumGenerator_Thread(neuron* n) { int id = blockIdx.x + threadIdx.x; n[id].sum = -n[id].bias; for (int j = 0; j < n[id].in_no; j++) { n[id].sum += n[id].input_n[j]->output * n[id].input_weight[j]; } } __global__ void Output_Layer_SoftmaxSum_Thread(neuron* n, int out_sz, int o_l_sz_r2, double* tmp) { __shared__ double d_sum[2048]; //printf("OOO"); if (threadIdx.x < out_sz) d_sum[threadIdx.x] = powf(12, n[threadIdx.x].sum); else d_sum[threadIdx.x] = 0; //printf("__"); //__syncthreads(); //printf("ttA"); if (threadIdx.x == 0) { /*int tm = 16; for (int i = 0; i < 5; i++) { tm /= 2; dot_adder << <1, tm >> > (4, tm); // lg16 = 4 //__syncthreads(); }*/ for (int i = 1; i < o_l_sz_r2; i++) { d_sum[0] += d_sum[i]; } *tmp = d_sum[0]; } //printf("BCCD "); } __global__ void Output_Layer_Thread(neuron* n, uint8_t* values, int index, double* tmp) { int digit = values[index]; double d = *tmp; int id = blockIdx.x + threadIdx.x; n[id].output = powf(12, n[id].sum) / d; if (id == digit) { n[id].error = n[id].output - 1; } else { n[id].error = n[id].output; } } __global__ void Output_Layer_Thread_vec(neuron* n, int* out_vec, int out_sz, int index, double* tmp) { double d = *tmp; int id = blockIdx.x + threadIdx.x; n[id].output = tanhf(n[id].sum);//(tanhf(n[id].sum) + 1) / 2;//powf(12, n[id].sum) / d; if (out_vec[(id*(int)powf(2, out_sz)) + index] == 1) { n[id].error = n[id].output - 1;// -(double)out_vec[(id*(int)powf(2, out_sz)) + index]; } else { n[id].error = n[id].output; } n[id].error *= (1 - n[id].output*n[id].output);//n[id].output*(1 - n[id].output); printf(" {%f:%d:%f}", n[id].output, out_vec[(id*(int)powf(2, out_sz)) + index], n[id].error); } __global__ void Output_Layer_Test_Thread(neuron* n, uint8_t* values, int index, int* output) { int digit = values[index]; int _i = 0; double opp = 0; /*Softmax Output*/ double d = 0; for (int i = 0; i < 10; i++) { n[i].sum = -n[i].bias; for (int j = 0; j < n[i].in_no; j++) { n[i].sum += n[i].input_n[j]->output * n[i].input_weight[j]; } d += powf(12, n[i].sum); } for (int i = 0; i < 10; i++) { n[i].output = powf(12, n[i].sum) / d; if (opp < n[i].output) { opp = n[i].output; _i = i; } } if (_i == digit) { printf("%d=>", _i); *output = *output + 1; } else printf("\n"); } __global__ void Output_Layer_Test_Thread_vec(neuron* n, int* out_vec, int out_sz, int index, int* output) { int _i = 0; double opp = 0; /*Softmax Output*/ double d = 0; for (int i = 0; i < out_sz; i++) { n[i].sum = -n[i].bias; for (int j = 0; j < n[i].in_no; j++) { n[i].sum += n[i].input_n[j]->output * n[i].input_weight[j]; } d += powf(12, n[i].sum); } for (int i = 0; i < out_sz; i++) { n[i].output = tanhf(n[i].sum);//(tanhf(n[i].sum) + 1) / 2;//powf(12, n[i].sum) / d; if (out_vec[(i*(int)powf(2, out_sz)) + index] - n[i].output == 0) { ++_i; } printf("[%f]<%d> ", n[i].output, out_vec[(i*(int)powf(2, out_sz)) + index]); } if (_i == out_sz) { printf("%d=>", _i); *output = *output + 1; } else printf("\n{%d}", _i); } __global__ void dot_product_FeedForward(neuron* n, int r_b) { int id = threadIdx.x; neuron* self = &n[blockIdx.x]; //int base = (int)ceilf(log2f(self->in_no)); //printf("->%d", base); __shared__ double d[2048]; if (id < self->in_no) d[id] = self->input_weight[id] * self->input_n[id]->output; else d[id] = 0; //__syncthreads(); if (id == 0) { for (int i = 1; i < r_b; i++) { d[0] += d[i]; } self->sum = d[0]; //if (self->in_no > 0) { self->output = tanh(self->sum); } } //__syncthreads(); } __global__ void dot_product_BackwardPropogation(neuron* n) { int id = threadIdx.x; neuron* self = &n[blockIdx.x]; //int base = (int)ceilf(log2f(self->in_no)); //printf("->%d", base); __shared__ double d[64]; if (id < self->out_no) d[id] = self->output_n[id]->error * self->output_weight[id]; else d[id] = 0; __syncthreads(); if (id == 0) { for (int i = 1; i < 64; i++) { d[0] += d[i]; } self->error = d[0]; self->error *= (1 - self->output*self->output); } //__syncthreads(); } __global__ void ForwardPropogation(neuron* n) { int id = blockIdx.x + threadIdx.x; neuron* self = &n[id]; //Calculate Sum, Output=> self->sum = 0; for (int i = 0; i < self->in_no; i++) { self->sum += self->input_weight[i] * self->input_n[i]->output; }//*/ //printf("[%f] ", self->sum); if (self->in_no > 0) { self->output = tanh(self->sum); } } __global__ void BackPropogation(neuron* n) { //printf("Bps"); int id = blockIdx.x + threadIdx.x; neuron* self = &n[id]; self->error = 0; for (int i = 0; i < self->out_no; i++) { self->error += self->output_n[i]->error * self->output_weight[i]; } self->error *= (1 - self->output*self->output); // Function Derivative, Derivative of tanh //printf("[%f] ", self->error); //printf("Bpe "); } __global__ void DeltaWeightPropogation(neuron* n) { //printf("Dps"); int id = blockIdx.x + threadIdx.x; neuron* self = &n[id]; self->bias += self->learning_rate* ((self->error)); for (int i = 0; i < self->in_no; i++) { self->input_weight[i] -= self->learning_rate * ((self->input_n[i]->output * self->error) - (self->input_weight[i] * self->regularization)); } //printf("Dpe "); } __global__ void linker_thread(neuron* n, int n_no, neuron* in, int in_no) { for (int i = 0; i < in_no; i++) { in[i].output_weight = new double[n_no]; in[i].output_n = new neuron*[n_no]; } minstd_rand rng; normal_distribution<double> dist(0, 1 / powf(in_no, 0.5)); for (int i = 0; i < n_no; i++) { n[i].input_weight = new double[in_no]; n[i].input_n = new neuron*[in_no]; for (int j = 0; j < in_no; j++) { n[i].input_weight[n[i].in_no] = dist(rng); n[i].input_n[n[i].in_no] = &in[j]; in[j].output_weight[in[j].out_no] = dist(rng); in[j].output_n[in[j].out_no] = &n[i]; ++n[i].in_no; ++in[j].out_no; } printf(">-%d-<", n[i].in_no); } printf("\nLayer %d Linked to %d and initialized", n_no, in_no); } void trainer_thread(neuron* in_l, neuron** h_l, int hidden_layers, int* hidden_n, neuron* o_l, idx_content_img* img_train, uint8_t* lbl_train, double* softmax_sum, int _i, int _n) { for (int i = _i; i < _n; i++) { for (int j = 0; j < 1; j++) { //Input_Layer_thread << <1, 1 >> > (in_l, img_train, i); Input_Layer_Thread << <28 * 28, 1 >> > (in_l, img_train, i); for (int m = 0; m < hidden_layers; m++) ForwardPropogation << <hidden_n[m], 1 >> > (h_l[m]); Output_Layer_SumGenerator_Thread << <10, 1 >> > (o_l); //Output_Layer_SoftmaxSummation_thread << <1, 1 >> > (o_l, softmax_sum); Output_Layer_Thread << <10, 1 >> > (o_l, lbl_train, i, softmax_sum); for (int m = 0; m < hidden_layers; m++) BackPropogation << <hidden_n[m], 1 >> > (h_l[m]); for (int m = 0; m < hidden_layers; m++) DeltaWeightPropogation << <hidden_n[m], 1 >> > (h_l[m]); DeltaWeightPropogation << <10, 1 >> > (o_l); //printf("\nas %d", i); } //printf("\n->"); } //printf("\nasdssdd"); }
eb7941637c6b65546248c00f63e976251481673c.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=64 --gridDim=1 --equality-abstraction --no-inline #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> #define N 2 __global__ void foo(int* p) { __shared__ int A[10]; A[0] = 1; p[0] = A[0]; } int main(){ int *b; int *dev_b; b = (int*)malloc(N*sizeof(int)); for (int i = 0; i < N; ++i){ b[i] = i+1; printf(" %d; ", b[i]); } printf("\n"); hipMalloc((void**)&dev_b, N*sizeof(int)); hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( foo), dim3(1),dim3(N), 0, 0, dev_b); //ESBMC_verify_kernel(foo,1,N,dev_b); hipMemcpy(b, dev_b, N*sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < N; ++i){ printf(" %d; ", b[i]); assert(b[0]==1); } free(b); hipFree(dev_b); }
eb7941637c6b65546248c00f63e976251481673c.cu
//pass //--blockDim=64 --gridDim=1 --equality-abstraction --no-inline #include "cuda.h" #include <stdio.h> #include <assert.h> #define N 2 __global__ void foo(int* p) { __shared__ int A[10]; A[0] = 1; p[0] = A[0]; } int main(){ int *b; int *dev_b; b = (int*)malloc(N*sizeof(int)); for (int i = 0; i < N; ++i){ b[i] = i+1; printf(" %d; ", b[i]); } printf("\n"); cudaMalloc((void**)&dev_b, N*sizeof(int)); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); foo<<<1,N>>>(dev_b); //ESBMC_verify_kernel(foo,1,N,dev_b); cudaMemcpy(b, dev_b, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i){ printf(" %d; ", b[i]); assert(b[0]==1); } free(b); cudaFree(dev_b); }
017e530ec2224bf8c4d3837ce1f8888f160310f1.hip
// !!! This is a file automatically generated by hipify!!! /* Enunciado: * Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width); const int TILE_WIDTH = 4;//Se ha establecido un tamao de tesela de 4 hilos __global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width) { int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y; int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x; int n = 0; if ((id_fil < Width) && (id_col < Width)) {//Si el hilo est fuera de los valores, no debe actuar for (int i = 0; i < Width; i++) { n = n + (a[id_fil*Width + i] * b[i*Width + id_col]); } c[id_fil*Width + id_col] = n; } } //Hace uso de ceil para obtener el tamao del Grid de Bloques int grid_calc(int Width, int Tile_Width) { double x = Width; double y = Tile_Width; return (int)(ceil(x / y));//redondea hacia arriba la divisin } void imprimeMatriz(int *v, int m, int n) {//( m * n ) int i, j, x; int ws;//numero de espacios de caracteres por casilla printf("\n"); for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { ws = 5; x = v[i*m + j]; if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-" ws--; x = -1 * x; } else {//para alinear los dgitos ws--; printf(" "); } do {//Se ocupa un hueco por digito del numero ws--; x = x / 10; } while (x > 0); printf("%d", v[i*m + j]);//imprimimos el numero while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco printf(" "); ws--; } } printf("\n"); } } void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m ) int i, j, x; int ws;//numero de espacios de caracteres por casilla printf("\n"); for (i = 0; i < m; i++) { for (j = 0; j < m; j++) { ws = 5; x = v[i*m + j]; if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-" ws--; x = -1 * x; } else {//para alinear los dgitos ws--; printf(" "); } do {//Se ocupa un hueco por digito del numero ws--; x = x / 10; } while (x > 0); printf("%d", v[i*m + j]);//imprimimos el numero while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco printf(" "); ws--; } } printf("\n"); } } void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n ) int i, j; for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { v[i*n + j] = (rand() % (max - min)) + min; } } } void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m ) int i, j; for (i = 0; i < m; i++) { for (j = 0; j < m; j++) { v[i*m + j] = (rand() % (max - min)) + min; } } } int main() { /*const int Width = 6;//Pruebas int a[6 * 6] = { 8, 7, 3, -4, -2, -3, 8, 9, -6, 3, -1, -4, -6, -1, -10, -7, 8, 6, 4, -6, -6, -3, 8, 7, -1, -1, -7, -8, -1, 9, 7, 8, 3, 7, 2, 3 }; int b[6 * 6] = { 0, 9, 9, 5, -10, 6, -2, 3, 8, 4, 0, -9, 7, 1, -8, -9, -10, -9, -3, -5, 7, -2, 6, 4, 7, 9, -3, -9, -9, 6, 6, 6, 4, -8, 8, -5 }; //Resultado de a * b = // -13 80 70 91 -140 -55 // -100 45 200 165 -25 47 // 45 76 -31 -50 94 53 // 77 141 19 -72 -14 133 // 24 66 22 7 113 -17 // 16 91 158 -16 -52 -32*/ srand((unsigned int)time(0)); const int max = 10; const int min = -10; const int Width = 16; int a[Width * Width] = { 0 }; generaMatriz(a, Width, max, min); int b[Width * Width] = { 0 }; generaMatriz(b, Width, max, min); int c[Width * Width] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = prodMatricesCuda(c, a, b, Width); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("\n\tMatriz A\n"); imprimeMatriz(a, Width); printf("\n\tMatriz B\n"); imprimeMatriz(b, Width); printf("\n\tResultado del producto:\n"); imprimeMatriz(c, Width); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH)); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, Width * Width * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, Width * Width * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, Width * Width * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, Width * Width * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, Width * Width * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( productoKernel), dim3(DimGrid), dim3(DimBlock), 0, 0, dev_c, dev_a, dev_b, Width); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, Width * Width * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
017e530ec2224bf8c4d3837ce1f8888f160310f1.cu
/* Enunciado: * Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width); const int TILE_WIDTH = 4;//Se ha establecido un tamaño de tesela de 4 hilos __global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width) { int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y; int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x; int n = 0; if ((id_fil < Width) && (id_col < Width)) {//Si el hilo está fuera de los valores, no debe actuar for (int i = 0; i < Width; i++) { n = n + (a[id_fil*Width + i] * b[i*Width + id_col]); } c[id_fil*Width + id_col] = n; } } //Hace uso de ceil para obtener el tamaño del Grid de Bloques int grid_calc(int Width, int Tile_Width) { double x = Width; double y = Tile_Width; return (int)(ceil(x / y));//redondea hacia arriba la división } void imprimeMatriz(int *v, int m, int n) {//( m * n ) int i, j, x; int ws;//numero de espacios de caracteres por casilla printf("\n"); for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { ws = 5; x = v[i*m + j]; if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-" ws--; x = -1 * x; } else {//para alinear los dígitos ws--; printf(" "); } do {//Se ocupa un hueco por digito del numero ws--; x = x / 10; } while (x > 0); printf("%d", v[i*m + j]);//imprimimos el numero while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco printf(" "); ws--; } } printf("\n"); } } void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m ) int i, j, x; int ws;//numero de espacios de caracteres por casilla printf("\n"); for (i = 0; i < m; i++) { for (j = 0; j < m; j++) { ws = 5; x = v[i*m + j]; if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-" ws--; x = -1 * x; } else {//para alinear los dígitos ws--; printf(" "); } do {//Se ocupa un hueco por digito del numero ws--; x = x / 10; } while (x > 0); printf("%d", v[i*m + j]);//imprimimos el numero while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco printf(" "); ws--; } } printf("\n"); } } void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n ) int i, j; for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { v[i*n + j] = (rand() % (max - min)) + min; } } } void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m ) int i, j; for (i = 0; i < m; i++) { for (j = 0; j < m; j++) { v[i*m + j] = (rand() % (max - min)) + min; } } } int main() { /*const int Width = 6;//Pruebas int a[6 * 6] = { 8, 7, 3, -4, -2, -3, 8, 9, -6, 3, -1, -4, -6, -1, -10, -7, 8, 6, 4, -6, -6, -3, 8, 7, -1, -1, -7, -8, -1, 9, 7, 8, 3, 7, 2, 3 }; int b[6 * 6] = { 0, 9, 9, 5, -10, 6, -2, 3, 8, 4, 0, -9, 7, 1, -8, -9, -10, -9, -3, -5, 7, -2, 6, 4, 7, 9, -3, -9, -9, 6, 6, 6, 4, -8, 8, -5 }; //Resultado de a * b = // -13 80 70 91 -140 -55 // -100 45 200 165 -25 47 // 45 76 -31 -50 94 53 // 77 141 19 -72 -14 133 // 24 66 22 7 113 -17 // 16 91 158 -16 -52 -32*/ srand((unsigned int)time(0)); const int max = 10; const int min = -10; const int Width = 16; int a[Width * Width] = { 0 }; generaMatriz(a, Width, max, min); int b[Width * Width] = { 0 }; generaMatriz(b, Width, max, min); int c[Width * Width] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = prodMatricesCuda(c, a, b, Width); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("\n\tMatriz A\n"); imprimeMatriz(a, Width); printf("\n\tMatriz B\n"); imprimeMatriz(b, Width); printf("\n\tResultado del producto:\n"); imprimeMatriz(c, Width); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH)); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, Width * Width * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, Width * Width * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, Width * Width * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, Width * Width * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, Width * Width * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, Width * Width * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
2408ea1eae84fdadcc9bbd9345c368725f50bc76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // KERNEL FUNCTION //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== __global__ void kernel(){ //====================================================================================================================================================== // COMMON VARIABLES //====================================================================================================================================================== fp* d_in; int rot_row; int rot_col; int in2_rowlow; int in2_collow; int ic; int jc; int jp1; int ja1, ja2; int ip1; int ia1, ia2; int ja, jb; int ia, ib; float s; int i; int j; int row; int col; int ori_row; int ori_col; int position; float sum; int pos_ori; float temp; float temp2; int location; int cent; int tMask_row; int tMask_col; float largest_value_current = 0; float largest_value = 0; int largest_coordinate_current = 0; int largest_coordinate = 0; float fin_max_val = 0; int fin_max_coo = 0; int largest_row; int largest_col; int offset_row; int offset_col; __shared__ float in_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE __shared__ float in_sqr_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE __shared__ float in_final_sum; __shared__ float in_sqr_final_sum; float mean; float mean_sqr; float variance; float deviation; __shared__ float denomT; __shared__ float par_max_val[131]; // WATCH THIS !!! HARDCODED VALUE __shared__ int par_max_coo[131]; // WATCH THIS !!! HARDCODED VALUE int pointer; __shared__ float d_in_mod_temp[2601]; int ori_pointer; int loc_pointer; //====================================================================================================================================================== // THREAD PARAMETERS //====================================================================================================================================================== int bx = blockIdx.x; // get current horizontal block index (0-n) int tx = threadIdx.x; // get current horizontal thread index (0-n) int ei_new; //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // GENERATE TEMPLATE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // generate templates based on the first frame only if(d_common_change.frame_no == 0){ //====================================================================================================================================================== // GET POINTER TO TEMPLATE FOR THE POINT //====================================================================================================================================================== // pointers to: current template for current point d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer]; //====================================================================================================================================================== // UPDATE ROW LOC AND COL LOC //====================================================================================================================================================== // uptade temporary endo/epi row/col coordinates (in each block corresponding to point, narrow work to one thread) ei_new = tx; if(ei_new == 0){ // update temporary row/col coordinates pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no; d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no]; d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no]; } //====================================================================================================================================================== // CREATE TEMPLATES //====================================================================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right) ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1; ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1; ori_pointer = ori_col*d_common.frame_rows+ori_row; // update template d_in[col*d_common.in_rows+row] = d_common_change.d_frame[ori_pointer]; // go for second round ei_new = ei_new + NUMBER_THREADS; } } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // PROCESS POINTS //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // process points in all frames except for the first one if(d_common_change.frame_no != 0){ //====================================================================================================================================================== // SELECTION //====================================================================================================================================================== in2_rowlow = d_unique[bx].d_Row[d_unique[bx].point_no] - d_common.sSize; // (1 to n+1) in2_collow = d_unique[bx].d_Col[d_unique[bx].point_no] - d_common.sSize; // work ei_new = tx; while(ei_new < d_common.in2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_rows == 0){ row = d_common.in2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + in2_rowlow - 1; ori_col = col + in2_collow - 1; d_unique[bx].d_in2[ei_new] = d_common_change.d_frame[ori_col*d_common.frame_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CONVOLUTION //====================================================================================================================================================== //==================================================================================================== // ROTATION //==================================================================================================== // variables d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer]; // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // execution rot_row = (d_common.in_rows-1) - row; rot_col = (d_common.in_rows-1) - col; d_in_mod_temp[ei_new] = d_in[rot_col*d_common.in_rows+rot_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // ACTUAL CONVOLUTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.conv_elem){ // figure out row/col location in array ic = (ei_new+1) % d_common.conv_rows; // (1-n) jc = (ei_new+1) / d_common.conv_rows + 1; // (1-n) if((ei_new+1) % d_common.conv_rows == 0){ ic = d_common.conv_rows; jc = jc-1; } // j = jc + d_common.joffset; jp1 = j + 1; if(d_common.in2_cols < jp1){ ja1 = jp1 - d_common.in2_cols; } else{ ja1 = 1; } if(d_common.in_cols < j){ ja2 = d_common.in_cols; } else{ ja2 = j; } i = ic + d_common.ioffset; ip1 = i + 1; if(d_common.in2_rows < ip1){ ia1 = ip1 - d_common.in2_rows; } else{ ia1 = 1; } if(d_common.in_rows < i){ ia2 = d_common.in_rows; } else{ ia2 = i; } s = 0; for(ja=ja1; ja<=ja2; ja++){ jb = jp1 - ja; for(ia=ia1; ia<=ia2; ia++){ ib = ip1 - ia; s = s + d_in_mod_temp[d_common.in_rows*(ja-1)+ia-1] * d_unique[bx].d_in2[d_common.in2_rows*(jb-1)+ib-1]; } } //d_unique[bx].d_conv[d_common.conv_rows*(jc-1)+ic-1] = s; d_unique[bx].d_conv[ei_new] = s; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CUMULATIVE SUM //====================================================================================================================================================== //==================================================================================================== // PAD ARRAY, VERTICAL CUMULATIVE SUM //==================================================================================================== //================================================== // PADD ARRAY //================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){ row = d_common.in2_pad_cumv_rows - 1; col = col-1; } // execution if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array row < (d_common.in2_pad_add_rows+d_common.in2_rows) && col > (d_common.in2_pad_add_cols-1) && col < (d_common.in2_pad_add_cols+d_common.in2_cols)){ ori_row = row - d_common.in2_pad_add_rows; ori_col = col - d_common.in2_pad_add_cols; d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2[ori_col*d_common.in2_rows+ori_row]; } else{ // do if otherwise d_unique[bx].d_in2_pad_cumv[ei_new] = 0; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // VERTICAL CUMULATIVE SUM //================================================== //work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_cols){ // figure out column position pos_ori = ei_new*d_common.in2_pad_cumv_rows; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){ d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum; sum = d_unique[bx].d_in2_pad_cumv[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){ row = d_common.in2_pad_cumv_sel_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel_collow - 1; d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){ row = d_common.in2_sub_cumh_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1; d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // subtract d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // HORIZONTAL CUMULATIVE SUM //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_rows){ // figure out row position pos_ori = ei_new; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){ d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum; sum = d_unique[bx].d_in2_sub_cumh[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){ row = d_common.in2_sub_cumh_sel_rows - 1; col = col - 1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel_collow - 1; d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub2_rows == 0){ row = d_common.in2_sub2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1; d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // subtract d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CUMULATIVE SUM 2 //====================================================================================================================================================== //==================================================================================================== // MULTIPLICATION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sqr_elem){ temp = d_unique[bx].d_in2[ei_new]; d_unique[bx].d_in2_sqr[ei_new] = temp * temp; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // PAD ARRAY, VERTICAL CUMULATIVE SUM //==================================================================================================== //================================================== // PAD ARRAY //================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){ row = d_common.in2_pad_cumv_rows - 1; col = col-1; } // execution if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array row < (d_common.in2_pad_add_rows+d_common.in2_sqr_rows) && col > (d_common.in2_pad_add_cols-1) && col < (d_common.in2_pad_add_cols+d_common.in2_sqr_cols)){ ori_row = row - d_common.in2_pad_add_rows; ori_col = col - d_common.in2_pad_add_cols; d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2_sqr[ori_col*d_common.in2_sqr_rows+ori_row]; } else{ // do if otherwise d_unique[bx].d_in2_pad_cumv[ei_new] = 0; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // VERTICAL CUMULATIVE SUM //================================================== //work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_cols){ // figure out column position pos_ori = ei_new*d_common.in2_pad_cumv_rows; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){ d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum; sum = d_unique[bx].d_in2_pad_cumv[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){ row = d_common.in2_pad_cumv_sel_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel_collow - 1; d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){ row = d_common.in2_sub_cumh_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1; d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // subtract d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // HORIZONTAL CUMULATIVE SUM //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_rows){ // figure out row position pos_ori = ei_new; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){ d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum; sum = d_unique[bx].d_in2_sub_cumh[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){ row = d_common.in2_sub_cumh_sel_rows - 1; col = col - 1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel_collow - 1; d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub2_rows == 0){ row = d_common.in2_sub2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1; d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // subtract d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sqr_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // FINAL //====================================================================================================================================================== //==================================================================================================== // DENOMINATOR A SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ temp = d_unique[bx].d_in2_sub2[ei_new]; temp2 = d_unique[bx].d_in2_sqr_sub2[ei_new] - (temp * temp / d_common.in_elem); if(temp2 < 0){ temp2 = 0; } d_unique[bx].d_in2_sqr_sub2[ei_new] = sqrt(temp2); // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // MULTIPLICATION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_sqr_elem){ temp = d_in[ei_new]; d_unique[bx].d_in_sqr[ei_new] = temp * temp; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // IN SUM //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_cols){ sum = 0; for(i = 0; i < d_common.in_rows; i++){ sum = sum + d_in[ei_new*d_common.in_rows+i]; } in_partial_sum[ei_new] = sum; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // IN_SQR SUM //==================================================================================================== ei_new = tx; while(ei_new < d_common.in_sqr_rows){ sum = 0; for(i = 0; i < d_common.in_sqr_cols; i++){ sum = sum + d_unique[bx].d_in_sqr[ei_new+d_common.in_sqr_rows*i]; } in_sqr_partial_sum[ei_new] = sum; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // FINAL SUMMATION //==================================================================================================== if(tx == 0){ in_final_sum = 0; for(i = 0; i<d_common.in_cols; i++){ in_final_sum = in_final_sum + in_partial_sum[i]; } }else if(tx == 1){ in_sqr_final_sum = 0; for(i = 0; i<d_common.in_sqr_cols; i++){ in_sqr_final_sum = in_sqr_final_sum + in_sqr_partial_sum[i]; } } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // DENOMINATOR T //==================================================================================================== if(tx == 0){ mean = in_final_sum / d_common.in_elem; // gets mean (average) value of element in ROI mean_sqr = mean * mean; variance = (in_sqr_final_sum / d_common.in_elem) - mean_sqr; // gets variance of ROI deviation = sqrt(variance); // gets standard deviation of ROI denomT = sqrt(float(d_common.in_elem-1))*deviation; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // DENOMINATOR SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * denomT; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // NUMERATOR SAVE RESULT IN CONVOLUTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.conv_elem){ d_unique[bx].d_conv[ei_new] = d_unique[bx].d_conv[ei_new] - d_unique[bx].d_in2_sub2[ei_new] * in_final_sum / d_common.in_elem; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // CORRELATION SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_conv[ei_new] / d_unique[bx].d_in2_sqr_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // TEMPLATE MASK CREATE //====================================================================================================================================================== cent = d_common.sSize + d_common.tSize + 1; if(d_common_change.frame_no == 0){ tMask_row = cent + d_unique[bx].d_Row[d_unique[bx].point_no] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1; tMask_col = cent + d_unique[bx].d_Col[d_unique[bx].point_no] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1; } else{ pointer = d_common_change.frame_no-1+d_unique[bx].point_no*d_common.no_frames; tMask_row = cent + d_unique[bx].d_tRowLoc[pointer] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1; tMask_col = cent + d_unique[bx].d_tColLoc[pointer] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1; } //work ei_new = tx; while(ei_new < d_common.tMask_elem){ location = tMask_col*d_common.tMask_rows + tMask_row; if(ei_new==location){ d_unique[bx].d_tMask[ei_new] = 1; } else{ d_unique[bx].d_tMask[ei_new] = 0; } //go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // MASK CONVOLUTION //====================================================================================================================================================== // work ei_new = tx; while(ei_new < d_common.mask_conv_elem){ // figure out row/col location in array ic = (ei_new+1) % d_common.mask_conv_rows; // (1-n) jc = (ei_new+1) / d_common.mask_conv_rows + 1; // (1-n) if((ei_new+1) % d_common.mask_conv_rows == 0){ ic = d_common.mask_conv_rows; jc = jc-1; } // j = jc + d_common.mask_conv_joffset; jp1 = j + 1; if(d_common.mask_cols < jp1){ ja1 = jp1 - d_common.mask_cols; } else{ ja1 = 1; } if(d_common.tMask_cols < j){ ja2 = d_common.tMask_cols; } else{ ja2 = j; } i = ic + d_common.mask_conv_ioffset; ip1 = i + 1; if(d_common.mask_rows < ip1){ ia1 = ip1 - d_common.mask_rows; } else{ ia1 = 1; } if(d_common.tMask_rows < i){ ia2 = d_common.tMask_rows; } else{ ia2 = i; } s = 0; for(ja=ja1; ja<=ja2; ja++){ jb = jp1 - ja; for(ia=ia1; ia<=ia2; ia++){ ib = ip1 - ia; s = s + d_unique[bx].d_tMask[d_common.tMask_rows*(ja-1)+ia-1] * 1; } } // //d_unique[bx].d_mask_conv[d_common.mask_conv_rows*(jc-1)+ic-1] = s; d_unique[bx].d_mask_conv[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * s; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // MAXIMUM VALUE //====================================================================================================================================================== //==================================================================================================== // INITIAL SEARCH //==================================================================================================== ei_new = tx; while(ei_new < d_common.mask_conv_rows){ for(i=0; i<d_common.mask_conv_cols; i++){ largest_coordinate_current = ei_new*d_common.mask_conv_rows+i; largest_value_current = abs(d_unique[bx].d_mask_conv[largest_coordinate_current]); if(largest_value_current > largest_value){ largest_coordinate = largest_coordinate_current; largest_value = largest_value_current; } } par_max_coo[ei_new] = largest_coordinate; par_max_val[ei_new] = largest_value; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // FINAL SEARCH //==================================================================================================== if(tx == 0){ for(i = 0; i < d_common.mask_conv_rows; i++){ if(par_max_val[i] > fin_max_val){ fin_max_val = par_max_val[i]; fin_max_coo = par_max_coo[i]; } } // convert coordinate to row/col form largest_row = (fin_max_coo+1) % d_common.mask_conv_rows - 1; // (0-n) row largest_col = (fin_max_coo+1) / d_common.mask_conv_rows; // (0-n) column if((fin_max_coo+1) % d_common.mask_conv_rows == 0){ largest_row = d_common.mask_conv_rows - 1; largest_col = largest_col - 1; } // calculate offset largest_row = largest_row + 1; // compensate to match MATLAB format (1-n) largest_col = largest_col + 1; // compensate to match MATLAB format (1-n) offset_row = largest_row - d_common.in_rows - (d_common.sSize - d_common.tSize); offset_col = largest_col - d_common.in_cols - (d_common.sSize - d_common.tSize); pointer = d_common_change.frame_no+d_unique[bx].point_no*d_common.no_frames; d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no] + offset_row; d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no] + offset_col; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // COORDINATE AND TEMPLATE UPDATE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // time19 = clock(); // if the last frame in the bath, update template if(d_common_change.frame_no != 0 && (d_common_change.frame_no)%10 == 0){ // update coordinate loc_pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no; d_unique[bx].d_Row[d_unique[bx].point_no] = d_unique[bx].d_tRowLoc[loc_pointer]; d_unique[bx].d_Col[d_unique[bx].point_no] = d_unique[bx].d_tColLoc[loc_pointer]; // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right) ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1; ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1; ori_pointer = ori_col*d_common.frame_rows+ori_row; // update template d_in[ei_new] = d_common.alpha*d_in[ei_new] + (1.00-d_common.alpha)*d_common_change.d_frame[ori_pointer]; // go for second round ei_new = ei_new + NUMBER_THREADS; } } } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // END OF FUNCTION //=============================================================================================================================================================================================================== //===============================================================================================================================================================================================================
2408ea1eae84fdadcc9bbd9345c368725f50bc76.cu
#include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // KERNEL FUNCTION //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== __global__ void kernel(){ //====================================================================================================================================================== // COMMON VARIABLES //====================================================================================================================================================== fp* d_in; int rot_row; int rot_col; int in2_rowlow; int in2_collow; int ic; int jc; int jp1; int ja1, ja2; int ip1; int ia1, ia2; int ja, jb; int ia, ib; float s; int i; int j; int row; int col; int ori_row; int ori_col; int position; float sum; int pos_ori; float temp; float temp2; int location; int cent; int tMask_row; int tMask_col; float largest_value_current = 0; float largest_value = 0; int largest_coordinate_current = 0; int largest_coordinate = 0; float fin_max_val = 0; int fin_max_coo = 0; int largest_row; int largest_col; int offset_row; int offset_col; __shared__ float in_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE __shared__ float in_sqr_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE __shared__ float in_final_sum; __shared__ float in_sqr_final_sum; float mean; float mean_sqr; float variance; float deviation; __shared__ float denomT; __shared__ float par_max_val[131]; // WATCH THIS !!! HARDCODED VALUE __shared__ int par_max_coo[131]; // WATCH THIS !!! HARDCODED VALUE int pointer; __shared__ float d_in_mod_temp[2601]; int ori_pointer; int loc_pointer; //====================================================================================================================================================== // THREAD PARAMETERS //====================================================================================================================================================== int bx = blockIdx.x; // get current horizontal block index (0-n) int tx = threadIdx.x; // get current horizontal thread index (0-n) int ei_new; //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // GENERATE TEMPLATE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // generate templates based on the first frame only if(d_common_change.frame_no == 0){ //====================================================================================================================================================== // GET POINTER TO TEMPLATE FOR THE POINT //====================================================================================================================================================== // pointers to: current template for current point d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer]; //====================================================================================================================================================== // UPDATE ROW LOC AND COL LOC //====================================================================================================================================================== // uptade temporary endo/epi row/col coordinates (in each block corresponding to point, narrow work to one thread) ei_new = tx; if(ei_new == 0){ // update temporary row/col coordinates pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no; d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no]; d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no]; } //====================================================================================================================================================== // CREATE TEMPLATES //====================================================================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right) ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1; ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1; ori_pointer = ori_col*d_common.frame_rows+ori_row; // update template d_in[col*d_common.in_rows+row] = d_common_change.d_frame[ori_pointer]; // go for second round ei_new = ei_new + NUMBER_THREADS; } } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // PROCESS POINTS //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // process points in all frames except for the first one if(d_common_change.frame_no != 0){ //====================================================================================================================================================== // SELECTION //====================================================================================================================================================== in2_rowlow = d_unique[bx].d_Row[d_unique[bx].point_no] - d_common.sSize; // (1 to n+1) in2_collow = d_unique[bx].d_Col[d_unique[bx].point_no] - d_common.sSize; // work ei_new = tx; while(ei_new < d_common.in2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_rows == 0){ row = d_common.in2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + in2_rowlow - 1; ori_col = col + in2_collow - 1; d_unique[bx].d_in2[ei_new] = d_common_change.d_frame[ori_col*d_common.frame_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CONVOLUTION //====================================================================================================================================================== //==================================================================================================== // ROTATION //==================================================================================================== // variables d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer]; // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // execution rot_row = (d_common.in_rows-1) - row; rot_col = (d_common.in_rows-1) - col; d_in_mod_temp[ei_new] = d_in[rot_col*d_common.in_rows+rot_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // ACTUAL CONVOLUTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.conv_elem){ // figure out row/col location in array ic = (ei_new+1) % d_common.conv_rows; // (1-n) jc = (ei_new+1) / d_common.conv_rows + 1; // (1-n) if((ei_new+1) % d_common.conv_rows == 0){ ic = d_common.conv_rows; jc = jc-1; } // j = jc + d_common.joffset; jp1 = j + 1; if(d_common.in2_cols < jp1){ ja1 = jp1 - d_common.in2_cols; } else{ ja1 = 1; } if(d_common.in_cols < j){ ja2 = d_common.in_cols; } else{ ja2 = j; } i = ic + d_common.ioffset; ip1 = i + 1; if(d_common.in2_rows < ip1){ ia1 = ip1 - d_common.in2_rows; } else{ ia1 = 1; } if(d_common.in_rows < i){ ia2 = d_common.in_rows; } else{ ia2 = i; } s = 0; for(ja=ja1; ja<=ja2; ja++){ jb = jp1 - ja; for(ia=ia1; ia<=ia2; ia++){ ib = ip1 - ia; s = s + d_in_mod_temp[d_common.in_rows*(ja-1)+ia-1] * d_unique[bx].d_in2[d_common.in2_rows*(jb-1)+ib-1]; } } //d_unique[bx].d_conv[d_common.conv_rows*(jc-1)+ic-1] = s; d_unique[bx].d_conv[ei_new] = s; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CUMULATIVE SUM //====================================================================================================================================================== //==================================================================================================== // PAD ARRAY, VERTICAL CUMULATIVE SUM //==================================================================================================== //================================================== // PADD ARRAY //================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){ row = d_common.in2_pad_cumv_rows - 1; col = col-1; } // execution if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array row < (d_common.in2_pad_add_rows+d_common.in2_rows) && col > (d_common.in2_pad_add_cols-1) && col < (d_common.in2_pad_add_cols+d_common.in2_cols)){ ori_row = row - d_common.in2_pad_add_rows; ori_col = col - d_common.in2_pad_add_cols; d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2[ori_col*d_common.in2_rows+ori_row]; } else{ // do if otherwise d_unique[bx].d_in2_pad_cumv[ei_new] = 0; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // VERTICAL CUMULATIVE SUM //================================================== //work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_cols){ // figure out column position pos_ori = ei_new*d_common.in2_pad_cumv_rows; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){ d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum; sum = d_unique[bx].d_in2_pad_cumv[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){ row = d_common.in2_pad_cumv_sel_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel_collow - 1; d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){ row = d_common.in2_sub_cumh_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1; d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // subtract d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // HORIZONTAL CUMULATIVE SUM //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_rows){ // figure out row position pos_ori = ei_new; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){ d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum; sum = d_unique[bx].d_in2_sub_cumh[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){ row = d_common.in2_sub_cumh_sel_rows - 1; col = col - 1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel_collow - 1; d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub2_rows == 0){ row = d_common.in2_sub2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1; d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // subtract d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CUMULATIVE SUM 2 //====================================================================================================================================================== //==================================================================================================== // MULTIPLICATION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sqr_elem){ temp = d_unique[bx].d_in2[ei_new]; d_unique[bx].d_in2_sqr[ei_new] = temp * temp; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // PAD ARRAY, VERTICAL CUMULATIVE SUM //==================================================================================================== //================================================== // PAD ARRAY //================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){ row = d_common.in2_pad_cumv_rows - 1; col = col-1; } // execution if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array row < (d_common.in2_pad_add_rows+d_common.in2_sqr_rows) && col > (d_common.in2_pad_add_cols-1) && col < (d_common.in2_pad_add_cols+d_common.in2_sqr_cols)){ ori_row = row - d_common.in2_pad_add_rows; ori_col = col - d_common.in2_pad_add_cols; d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2_sqr[ori_col*d_common.in2_sqr_rows+ori_row]; } else{ // do if otherwise d_unique[bx].d_in2_pad_cumv[ei_new] = 0; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // VERTICAL CUMULATIVE SUM //================================================== //work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_cols){ // figure out column position pos_ori = ei_new*d_common.in2_pad_cumv_rows; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){ d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum; sum = d_unique[bx].d_in2_pad_cumv[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){ row = d_common.in2_pad_cumv_sel_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel_collow - 1; d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){ row = d_common.in2_sub_cumh_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1; d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // subtract d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // HORIZONTAL CUMULATIVE SUM //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_rows){ // figure out row position pos_ori = ei_new; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){ d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum; sum = d_unique[bx].d_in2_sub_cumh[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){ row = d_common.in2_sub_cumh_sel_rows - 1; col = col - 1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel_collow - 1; d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub2_rows == 0){ row = d_common.in2_sub2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1; d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // subtract d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sqr_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // FINAL //====================================================================================================================================================== //==================================================================================================== // DENOMINATOR A SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ temp = d_unique[bx].d_in2_sub2[ei_new]; temp2 = d_unique[bx].d_in2_sqr_sub2[ei_new] - (temp * temp / d_common.in_elem); if(temp2 < 0){ temp2 = 0; } d_unique[bx].d_in2_sqr_sub2[ei_new] = sqrt(temp2); // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // MULTIPLICATION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_sqr_elem){ temp = d_in[ei_new]; d_unique[bx].d_in_sqr[ei_new] = temp * temp; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // IN SUM //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_cols){ sum = 0; for(i = 0; i < d_common.in_rows; i++){ sum = sum + d_in[ei_new*d_common.in_rows+i]; } in_partial_sum[ei_new] = sum; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // IN_SQR SUM //==================================================================================================== ei_new = tx; while(ei_new < d_common.in_sqr_rows){ sum = 0; for(i = 0; i < d_common.in_sqr_cols; i++){ sum = sum + d_unique[bx].d_in_sqr[ei_new+d_common.in_sqr_rows*i]; } in_sqr_partial_sum[ei_new] = sum; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // FINAL SUMMATION //==================================================================================================== if(tx == 0){ in_final_sum = 0; for(i = 0; i<d_common.in_cols; i++){ in_final_sum = in_final_sum + in_partial_sum[i]; } }else if(tx == 1){ in_sqr_final_sum = 0; for(i = 0; i<d_common.in_sqr_cols; i++){ in_sqr_final_sum = in_sqr_final_sum + in_sqr_partial_sum[i]; } } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // DENOMINATOR T //==================================================================================================== if(tx == 0){ mean = in_final_sum / d_common.in_elem; // gets mean (average) value of element in ROI mean_sqr = mean * mean; variance = (in_sqr_final_sum / d_common.in_elem) - mean_sqr; // gets variance of ROI deviation = sqrt(variance); // gets standard deviation of ROI denomT = sqrt(float(d_common.in_elem-1))*deviation; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // DENOMINATOR SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * denomT; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // NUMERATOR SAVE RESULT IN CONVOLUTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.conv_elem){ d_unique[bx].d_conv[ei_new] = d_unique[bx].d_conv[ei_new] - d_unique[bx].d_in2_sub2[ei_new] * in_final_sum / d_common.in_elem; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // CORRELATION SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_conv[ei_new] / d_unique[bx].d_in2_sqr_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // TEMPLATE MASK CREATE //====================================================================================================================================================== cent = d_common.sSize + d_common.tSize + 1; if(d_common_change.frame_no == 0){ tMask_row = cent + d_unique[bx].d_Row[d_unique[bx].point_no] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1; tMask_col = cent + d_unique[bx].d_Col[d_unique[bx].point_no] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1; } else{ pointer = d_common_change.frame_no-1+d_unique[bx].point_no*d_common.no_frames; tMask_row = cent + d_unique[bx].d_tRowLoc[pointer] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1; tMask_col = cent + d_unique[bx].d_tColLoc[pointer] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1; } //work ei_new = tx; while(ei_new < d_common.tMask_elem){ location = tMask_col*d_common.tMask_rows + tMask_row; if(ei_new==location){ d_unique[bx].d_tMask[ei_new] = 1; } else{ d_unique[bx].d_tMask[ei_new] = 0; } //go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // MASK CONVOLUTION //====================================================================================================================================================== // work ei_new = tx; while(ei_new < d_common.mask_conv_elem){ // figure out row/col location in array ic = (ei_new+1) % d_common.mask_conv_rows; // (1-n) jc = (ei_new+1) / d_common.mask_conv_rows + 1; // (1-n) if((ei_new+1) % d_common.mask_conv_rows == 0){ ic = d_common.mask_conv_rows; jc = jc-1; } // j = jc + d_common.mask_conv_joffset; jp1 = j + 1; if(d_common.mask_cols < jp1){ ja1 = jp1 - d_common.mask_cols; } else{ ja1 = 1; } if(d_common.tMask_cols < j){ ja2 = d_common.tMask_cols; } else{ ja2 = j; } i = ic + d_common.mask_conv_ioffset; ip1 = i + 1; if(d_common.mask_rows < ip1){ ia1 = ip1 - d_common.mask_rows; } else{ ia1 = 1; } if(d_common.tMask_rows < i){ ia2 = d_common.tMask_rows; } else{ ia2 = i; } s = 0; for(ja=ja1; ja<=ja2; ja++){ jb = jp1 - ja; for(ia=ia1; ia<=ia2; ia++){ ib = ip1 - ia; s = s + d_unique[bx].d_tMask[d_common.tMask_rows*(ja-1)+ia-1] * 1; } } // //d_unique[bx].d_mask_conv[d_common.mask_conv_rows*(jc-1)+ic-1] = s; d_unique[bx].d_mask_conv[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * s; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // MAXIMUM VALUE //====================================================================================================================================================== //==================================================================================================== // INITIAL SEARCH //==================================================================================================== ei_new = tx; while(ei_new < d_common.mask_conv_rows){ for(i=0; i<d_common.mask_conv_cols; i++){ largest_coordinate_current = ei_new*d_common.mask_conv_rows+i; largest_value_current = abs(d_unique[bx].d_mask_conv[largest_coordinate_current]); if(largest_value_current > largest_value){ largest_coordinate = largest_coordinate_current; largest_value = largest_value_current; } } par_max_coo[ei_new] = largest_coordinate; par_max_val[ei_new] = largest_value; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // FINAL SEARCH //==================================================================================================== if(tx == 0){ for(i = 0; i < d_common.mask_conv_rows; i++){ if(par_max_val[i] > fin_max_val){ fin_max_val = par_max_val[i]; fin_max_coo = par_max_coo[i]; } } // convert coordinate to row/col form largest_row = (fin_max_coo+1) % d_common.mask_conv_rows - 1; // (0-n) row largest_col = (fin_max_coo+1) / d_common.mask_conv_rows; // (0-n) column if((fin_max_coo+1) % d_common.mask_conv_rows == 0){ largest_row = d_common.mask_conv_rows - 1; largest_col = largest_col - 1; } // calculate offset largest_row = largest_row + 1; // compensate to match MATLAB format (1-n) largest_col = largest_col + 1; // compensate to match MATLAB format (1-n) offset_row = largest_row - d_common.in_rows - (d_common.sSize - d_common.tSize); offset_col = largest_col - d_common.in_cols - (d_common.sSize - d_common.tSize); pointer = d_common_change.frame_no+d_unique[bx].point_no*d_common.no_frames; d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no] + offset_row; d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no] + offset_col; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // COORDINATE AND TEMPLATE UPDATE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // time19 = clock(); // if the last frame in the bath, update template if(d_common_change.frame_no != 0 && (d_common_change.frame_no)%10 == 0){ // update coordinate loc_pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no; d_unique[bx].d_Row[d_unique[bx].point_no] = d_unique[bx].d_tRowLoc[loc_pointer]; d_unique[bx].d_Col[d_unique[bx].point_no] = d_unique[bx].d_tColLoc[loc_pointer]; // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right) ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1; ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1; ori_pointer = ori_col*d_common.frame_rows+ori_row; // update template d_in[ei_new] = d_common.alpha*d_in[ei_new] + (1.00-d_common.alpha)*d_common_change.d_frame[ori_pointer]; // go for second round ei_new = ei_new + NUMBER_THREADS; } } } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // END OF FUNCTION //=============================================================================================================================================================================================================== //===============================================================================================================================================================================================================
50120ad9c450833118407bde11b9b009cdea479c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cmath> #include <utility> #include <cstdlib> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <boost/numeric/odeint.hpp> #include <boost/numeric/odeint/external/thrust/thrust_algebra.hpp> #include <boost/numeric/odeint/external/thrust/thrust_operations.hpp> #include <boost/numeric/odeint/external/thrust/thrust_resize.hpp> using namespace std; using namespace boost::numeric::odeint; typedef double value_type; typedef thrust::device_vector< value_type > state_type; const value_type sigma = 10.0; const value_type b = 8.0 / 3.0; struct lorenz_system { struct lorenz_functor { template< class T > __host__ __device__ void operator()( T t ) const { value_type R = thrust::get< 3 >( t ); value_type x = thrust::get< 0 >( t ); value_type y = thrust::get< 1 >( t ); value_type z = thrust::get< 2 >( t ); thrust::get< 4 >( t ) = sigma * ( y - x ); thrust::get< 5 >( t ) = R * x - y - x * z; thrust::get< 6 >( t ) = -b * z + x * y ; } }; lorenz_system( size_t N , const state_type &beta ) : m_N( N ) , m_beta( beta ) { } template< class State , class Deriv > void operator()( const State &x , Deriv &dxdt , value_type t ) const { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( boost::begin( x ) , boost::begin( x ) + m_N , boost::begin( x ) + 2 * m_N , m_beta.begin() , boost::begin( dxdt ) , boost::begin( dxdt ) + m_N , boost::begin( dxdt ) + 2 * m_N ) ) , thrust::make_zip_iterator( thrust::make_tuple( boost::begin( x ) + m_N , boost::begin( x ) + 2 * m_N , boost::begin( x ) + 3 * m_N , m_beta.begin() , boost::begin( dxdt ) + m_N , boost::begin( dxdt ) + 2 * m_N , boost::begin( dxdt ) + 3 * m_N ) ) , lorenz_functor() ); } size_t m_N; const state_type &m_beta; }; size_t N; const value_type dt = 0.01; const value_type t_max = 100.0; int main( int argc , char* argv[] ) { // int driver_version , runtime_version; // hipDriverGetVersion( &driver_version ); // hipRuntimeGetVersion ( &runtime_version ); // cout << driver_version << "\t" << runtime_version << endl; N = argc > 1 ? atoi(argv[1]) : 1024; vector< value_type > beta_host( N ); const value_type beta_min = value_type(0.1) , beta_max = value_type(50.0); for( size_t i=0 ; i<N ; ++i ) beta_host[i] = beta_min + value_type( i ) * ( beta_max - beta_min ) / value_type( N - 1 ); state_type beta = beta_host; //[ thrust_lorenz_parameters_integration state_type x( 3 * N ); // initialize x,y,z thrust::fill( x.begin() , x.end() , value_type(10.0) ); typedef runge_kutta4< state_type , value_type , state_type , value_type , thrust_algebra , thrust_operations > stepper_type; lorenz_system lorenz( N , beta ); integrate_const( stepper_type() , lorenz , x , value_type(0.0) , t_max , dt ); thrust::host_vector< value_type > res = x; // for( size_t i=0 ; i<N ; ++i ) cout << res[i] << "\t" << beta_host[i] << "\n"; cout << res[0] << endl; return 0; }
50120ad9c450833118407bde11b9b009cdea479c.cu
#include <iostream> #include <cmath> #include <utility> #include <cstdlib> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <boost/numeric/odeint.hpp> #include <boost/numeric/odeint/external/thrust/thrust_algebra.hpp> #include <boost/numeric/odeint/external/thrust/thrust_operations.hpp> #include <boost/numeric/odeint/external/thrust/thrust_resize.hpp> using namespace std; using namespace boost::numeric::odeint; typedef double value_type; typedef thrust::device_vector< value_type > state_type; const value_type sigma = 10.0; const value_type b = 8.0 / 3.0; struct lorenz_system { struct lorenz_functor { template< class T > __host__ __device__ void operator()( T t ) const { value_type R = thrust::get< 3 >( t ); value_type x = thrust::get< 0 >( t ); value_type y = thrust::get< 1 >( t ); value_type z = thrust::get< 2 >( t ); thrust::get< 4 >( t ) = sigma * ( y - x ); thrust::get< 5 >( t ) = R * x - y - x * z; thrust::get< 6 >( t ) = -b * z + x * y ; } }; lorenz_system( size_t N , const state_type &beta ) : m_N( N ) , m_beta( beta ) { } template< class State , class Deriv > void operator()( const State &x , Deriv &dxdt , value_type t ) const { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( boost::begin( x ) , boost::begin( x ) + m_N , boost::begin( x ) + 2 * m_N , m_beta.begin() , boost::begin( dxdt ) , boost::begin( dxdt ) + m_N , boost::begin( dxdt ) + 2 * m_N ) ) , thrust::make_zip_iterator( thrust::make_tuple( boost::begin( x ) + m_N , boost::begin( x ) + 2 * m_N , boost::begin( x ) + 3 * m_N , m_beta.begin() , boost::begin( dxdt ) + m_N , boost::begin( dxdt ) + 2 * m_N , boost::begin( dxdt ) + 3 * m_N ) ) , lorenz_functor() ); } size_t m_N; const state_type &m_beta; }; size_t N; const value_type dt = 0.01; const value_type t_max = 100.0; int main( int argc , char* argv[] ) { // int driver_version , runtime_version; // cudaDriverGetVersion( &driver_version ); // cudaRuntimeGetVersion ( &runtime_version ); // cout << driver_version << "\t" << runtime_version << endl; N = argc > 1 ? atoi(argv[1]) : 1024; vector< value_type > beta_host( N ); const value_type beta_min = value_type(0.1) , beta_max = value_type(50.0); for( size_t i=0 ; i<N ; ++i ) beta_host[i] = beta_min + value_type( i ) * ( beta_max - beta_min ) / value_type( N - 1 ); state_type beta = beta_host; //[ thrust_lorenz_parameters_integration state_type x( 3 * N ); // initialize x,y,z thrust::fill( x.begin() , x.end() , value_type(10.0) ); typedef runge_kutta4< state_type , value_type , state_type , value_type , thrust_algebra , thrust_operations > stepper_type; lorenz_system lorenz( N , beta ); integrate_const( stepper_type() , lorenz , x , value_type(0.0) , t_max , dt ); thrust::host_vector< value_type > res = x; // for( size_t i=0 ; i<N ; ++i ) cout << res[i] << "\t" << beta_host[i] << "\n"; cout << res[0] << endl; return 0; }
4eb0ef0127a439e6733865655c4642d2973359c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> s, Wed Jan 2 14:18:51 2019 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_S_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_S_MAKE( MAGMA_S_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_S_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_S_MAKE( MAGMA_S_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- SSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. In Complex, it sets the diagonal to be Real. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA REAL array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_ssymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaFloat_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( ssymmetrize_tiles_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( ssymmetrize_tiles_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } }
4eb0ef0127a439e6733865655c4642d2973359c9.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> s, Wed Jan 2 14:18:51 2019 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_S_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_S_MAKE( MAGMA_S_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_S_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_S_MAKE( MAGMA_S_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- SSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. In Complex, it sets the diagonal to be Real. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA REAL array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_ssymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaFloat_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { ssymmetrize_tiles_upper <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } else { ssymmetrize_tiles_lower <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } }
6f171be76d53ad243013ddd0745a4a3264632043.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void d(int *a, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < N; i += stride) { a[i] = 1; } } void h(int *a, int N) { for (int i = 0; i < N; ++i) { a[i] = 1; } } int main() { int N = 2<<24; size_t size = N * sizeof(int); int *a; hipMallocManaged(&a, size); hipLaunchKernelGGL(( d),( d)im3(256),( d)im3(256), 0, 0, a, N); hipFree(a); }
6f171be76d53ad243013ddd0745a4a3264632043.cu
__global__ void d(int *a, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < N; i += stride) { a[i] = 1; } } void h(int *a, int N) { for (int i = 0; i < N; ++i) { a[i] = 1; } } int main() { int N = 2<<24; size_t size = N * sizeof(int); int *a; cudaMallocManaged(&a, size); d<<<256, 256>>>(a, N); cudaFree(a); }
81c136c5c9381cccdd1a52911c52432c0b120edd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "types.cuh" #include "wh.cuh" #include "convert.h" #include "util.cuh" namespace sr { namespace wh { using namespace sr::data; const size_t MAXKEP = 5; const float64_t TOLKEP = 1E-14; struct MVSKernel { const float64_t* planet_m; const uint32_t* planet_id; const float64_t mu; const f64_3* planet_h0_log; const f64_3* planet_r_log; const uint32_t planet_n; const uint32_t tbsize; const float64_t dt; const float64_t outer_r; const float64_t* planet_rh; MVSKernel(const DevicePlanetPhaseSpace& planets, const Dvf64_3& h0_log, const Dvf64& _planet_rh, double _outer_r, uint32_t _tbsize, float64_t _dt) : planet_m(planets.m.data().get()), planet_id(planets.id.data().get()), mu(planets.m[0]), planet_h0_log(h0_log.data().get()), planet_r_log(planets.r_log.data().get()), planet_n(static_cast<uint32_t>(planets.n_alive)), tbsize(_tbsize), dt(_dt), outer_r(_outer_r), planet_rh(_planet_rh.data().get()) { } __host__ __device__ static void kepeq(double dM, double ecosEo, double esinEo, double* dE, double* sindE, double* cosdE, uint16_t& flags) { double f, fp, delta; *sindE = sin(*dE); *cosdE = cos(*dE); for (size_t i = 0; i < MAXKEP; i++) { f = *dE - ecosEo * (*sindE) + esinEo * (1. - *cosdE) - dM; fp = 1. - ecosEo * (*cosdE) + esinEo * (*sindE); delta = -f / fp; *dE += delta; *sindE = sin(*dE); *cosdE = cos(*dE); #ifdef CUDA_KEPEQ_CHECK_CONVERGENCE if (fabs(delta) < TOLKEP) { goto done; } #endif } flags = static_cast<uint16_t>((fabs(delta) > TOLKEP) << 2); #ifdef CUDA_KEPEQ_CHECK_CONVERGENCE done: ; #endif } __host__ __device__ static void drift(f64_3& r, f64_3& v, uint16_t& flags, double dt, double mu) { float64_t dist = sqrt(r.lensq()); float64_t vdotr = v.x * r.x + v.y * r.y + v.z * r.z; float64_t energy = v.lensq() * 0.5 - mu / dist; flags = static_cast<uint16_t>((energy >= 0) << 3); float64_t a = -0.5 * mu / energy; float64_t n_ = sqrt(mu / (a * a * a)); float64_t ecosEo = 1.0 - dist / a; float64_t esinEo = vdotr / (n_ * a * a); float64_t e = sqrt(ecosEo * ecosEo + esinEo * esinEo); // subtract off an integer multiple of complete orbits float64_t dM = dt * n_ - M_2PI * (int) (dt * n_ / M_2PI); // remaining time to advance float64_t _dt = dM / n_; // call kepler equation solver with initial guess in dE already float64_t dE = dM - esinEo + esinEo * cos(dM) + ecosEo * sin(dM); float64_t sindE, cosdE; kepeq(dM, ecosEo, esinEo, &dE, &sindE, &cosdE, flags); float64_t fp = 1.0 - ecosEo * cosdE + esinEo * sindE; float64_t f = 1.0 + a * (cosdE - 1.0) / dist; float64_t g = _dt + (sindE - dE) / n_; float64_t fdot = -n_ * sindE * a / (dist * fp); float64_t gdot = 1.0 + (cosdE - 1.0) / fp; f64_3 r0 = r; r = r0 * f + v * g; v = r0 * fdot + v * gdot; } __host__ __device__ static void step_forward( f64_3& r, f64_3& v, uint16_t& flags, f64_3& a, uint32_t& deathtime_index, uint32_t _tbsize, uint32_t planet_n, const f64_3* h0_log, const f64_3* r_log, const float64_t* m, const uint32_t* pl_id, const float64_t* rh, double outer_radius, float64_t dt, float64_t mu) { deathtime_index = 0; for (uint32_t step = 0; step < static_cast<uint32_t>(_tbsize); step++) { if (flags == 0) { // kick // if step = 0, the acceleration is preloaded - this comes from v = v + a * (dt / 2); drift(r, v, flags, dt, mu); a = h0_log[step]; // planet 0 is not counted for (uint32_t i = 1; i < static_cast<uint32_t>(planet_n); i++) { f64_3 dr = r - r_log[step * (planet_n - 1) + i - 1]; float64_t rad = dr.lensq(); if (rad < rh[i] * rh[i] && flags == 0) { flags = static_cast<uint16_t>((pl_id[i] << 8) | 0x0001); } float64_t inv3 = 1. / (rad * sqrt(rad)); float64_t fac = m[i] * inv3; a -= dr * fac; } float64_t rad = r.lensq(); if (rad < rh[0] * rh[0]) { flags = 0x0001; } if (rad > outer_radius * outer_radius) { flags = 0x0002; } v = v + a * (dt / 2); deathtime_index = step + 1; } } } // manual diagram [5] template<typename Tuple> __host__ __device__ void operator()(Tuple args) const { uint32_t _tbsize = this->tbsize; const f64_3* h0_log = this->planet_h0_log; const f64_3* r_log = this->planet_r_log; const float64_t* m = this->planet_m; const uint32_t* ids = this->planet_id; const float64_t* rh = this->planet_rh; f64_3 r = thrust::get<0>(thrust::get<0>(args)); f64_3 v = thrust::get<1>(thrust::get<0>(args)); uint16_t flags = thrust::get<2>(thrust::get<0>(args)); uint32_t deathtime_index = 0; f64_3 a = thrust::get<1>(args); step_forward(r, v, flags, a, deathtime_index, _tbsize, planet_n, h0_log, r_log, m, ids, rh, this->outer_r, this->dt, this->mu); thrust::get<0>(thrust::get<0>(args)) = r; thrust::get<1>(thrust::get<0>(args)) = v; thrust::get<2>(thrust::get<0>(args)) = flags; thrust::get<3>(thrust::get<0>(args)) = deathtime_index; thrust::get<1>(args) = a; } }; __global__ void MVSKernel_( f64_3* r, f64_3* v, uint16_t* flags, f64_3* a, uint32_t* deathtime_index, uint32_t n, uint32_t tbsize, uint32_t planet_n, const f64_3* h0_log, const f64_3* r_log, const float64_t* m, const uint32_t* pl_id, const float64_t* rh, double outer_r, float64_t dt, float64_t mu) { // max timeblock size: 384 __shared__ f64_3 h0_log_shared[384]; __shared__ f64_3 r_log_shared[1536]; for (int i = threadIdx.x; i < tbsize; i += blockDim.x) { h0_log_shared[i] = h0_log[i]; } for (int i = threadIdx.x; i < tbsize * (planet_n - 1); i += blockDim.x) { r_log_shared[i] = r_log[i]; } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { f64_3 ri = r[i]; f64_3 vi = v[i]; uint16_t flagsi = flags[i]; f64_3 ai = a[i]; uint32_t deathtime_indexi; MVSKernel::step_forward(ri, vi, flagsi, ai, deathtime_indexi, tbsize, planet_n, h0_log_shared, r_log_shared, m, pl_id, rh, outer_r, dt, mu); r[i] = ri; v[i] = vi; flags[i] = flagsi; a[i] = ai; deathtime_index[i] = deathtime_indexi; } } WHCudaIntegrator::WHCudaIntegrator() { } WHCudaIntegrator::WHCudaIntegrator(HostPlanetPhaseSpace& pl, HostParticlePhaseSpace& pa, const Configuration& config, hipStream_t stream) : base(pl, pa, config) { device_h0_log_0 = Dvf64_3(config.tbsize); device_h0_log_1 = Dvf64_3(config.tbsize); device_particle_a = Dvf64_3(pa.n()); device_planet_rh = Dvf64(pl.n()); hipStreamSynchronize(stream); } Dvf64_3& WHCudaIntegrator::device_h0_log(size_t planet_data_id) { return planet_data_id % 2 ? device_h0_log_1 : device_h0_log_0; } void WHCudaIntegrator::recalculate_rh(const HostPlanetPhaseSpace& pl) { base.recalculate_rh(pl); } void WHCudaIntegrator::helio_acc_particles( const HostPlanetPhaseSpace& pl, HostParticlePhaseSpace& pa, size_t begin, size_t len, float64_t time, size_t timestep_index, bool old) { base.helio_acc_particles(pl, pa, begin, len, time, timestep_index, old); } void WHCudaIntegrator::upload_planet_log_cuda(hipStream_t stream, size_t planet_data_id) { memcpy_htd(device_planet_rh, base.planet_rh, stream); memcpy_htd(device_h0_log(planet_data_id), base.planet_h0_log.log, stream); hipStreamSynchronize(stream); } void WHCudaIntegrator::gather_particles(const Vs& indices, size_t begin, size_t length) { base.gather_particles(indices, begin, length); } void WHCudaIntegrator::integrate_planets_timeblock(HostPlanetPhaseSpace& pl, size_t nstep, float64_t t, double dt) { base.integrate_planets_timeblock(pl, nstep, t, dt); } void WHCudaIntegrator::load_h0(const HostPlanetPhaseSpace& pl) { base.load_h0(pl); } void WHCudaIntegrator::integrate_particles_timeblock(const HostPlanetPhaseSpace& pl, HostParticlePhaseSpace& pa, size_t begin, size_t length, float64_t t, double dt) { base.integrate_particles_timeblock(pl, pa, begin, length, t, dt); } void WHCudaIntegrator::swap_logs() { base.swap_logs(); } void WHCudaIntegrator::upload_data_cuda(hipStream_t stream, size_t begin, size_t length) { memcpy_htd(device_particle_a, base.particle_a, stream, begin, begin, length); hipStreamSynchronize(stream); } void WHCudaIntegrator::integrate_particles_timeblock_cuda(hipStream_t stream, size_t planet_data_id, const DevicePlanetPhaseSpace& pl, DeviceParticlePhaseSpace& pa, double dt) { #ifndef CUDA_USE_SHARED_MEM_CACHE auto it = thrust::make_zip_iterator(thrust::make_tuple(pa.begin(), device_begin())); thrust::for_each( thrust::hip::par.on(stream), it, it + pa.n_alive, MVSKernel( pl, device_h0_log(planet_data_id), device_planet_rh, base.outer_radius, static_cast<uint32_t>(pl.log_len), dt ) ); #else hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); uint32_t block_size, grid_size, shared_mem = 0; if (pa.n_alive < static_cast<uint32_t>(1024 * prop.multiProcessorCount)) { block_size = static_cast<uint32_t>((pa.n_alive / prop.multiProcessorCount + 31) / 32 * 32); if (block_size == 0) block_size = 32; } else { block_size = 1024; } grid_size = static_cast<uint32_t>((pa.n_alive + block_size - 1) / block_size); hipLaunchKernelGGL(( MVSKernel_), dim3(grid_size), dim3(block_size), shared_mem, stream, pa.r.data().get(), pa.v.data().get(), pa.deathflags.data().get(), device_particle_a.data().get(), pa.deathtime_index.data().get(), static_cast<uint32_t>(pa.n_alive), static_cast<uint32_t>(pl.log_len), static_cast<uint32_t>(pl.n_alive), device_h0_log(planet_data_id).data().get(), pl.r_log.data().get(), pl.m.data().get(), pl.id.data().get(), device_planet_rh.data().get(), base.outer_radius, dt, pl.m[0] ); #endif } } }
81c136c5c9381cccdd1a52911c52432c0b120edd.cu
#include "types.cuh" #include "wh.cuh" #include "convert.h" #include "util.cuh" namespace sr { namespace wh { using namespace sr::data; const size_t MAXKEP = 5; const float64_t TOLKEP = 1E-14; struct MVSKernel { const float64_t* planet_m; const uint32_t* planet_id; const float64_t mu; const f64_3* planet_h0_log; const f64_3* planet_r_log; const uint32_t planet_n; const uint32_t tbsize; const float64_t dt; const float64_t outer_r; const float64_t* planet_rh; MVSKernel(const DevicePlanetPhaseSpace& planets, const Dvf64_3& h0_log, const Dvf64& _planet_rh, double _outer_r, uint32_t _tbsize, float64_t _dt) : planet_m(planets.m.data().get()), planet_id(planets.id.data().get()), mu(planets.m[0]), planet_h0_log(h0_log.data().get()), planet_r_log(planets.r_log.data().get()), planet_n(static_cast<uint32_t>(planets.n_alive)), tbsize(_tbsize), dt(_dt), outer_r(_outer_r), planet_rh(_planet_rh.data().get()) { } __host__ __device__ static void kepeq(double dM, double ecosEo, double esinEo, double* dE, double* sindE, double* cosdE, uint16_t& flags) { double f, fp, delta; *sindE = sin(*dE); *cosdE = cos(*dE); for (size_t i = 0; i < MAXKEP; i++) { f = *dE - ecosEo * (*sindE) + esinEo * (1. - *cosdE) - dM; fp = 1. - ecosEo * (*cosdE) + esinEo * (*sindE); delta = -f / fp; *dE += delta; *sindE = sin(*dE); *cosdE = cos(*dE); #ifdef CUDA_KEPEQ_CHECK_CONVERGENCE if (fabs(delta) < TOLKEP) { goto done; } #endif } flags = static_cast<uint16_t>((fabs(delta) > TOLKEP) << 2); #ifdef CUDA_KEPEQ_CHECK_CONVERGENCE done: ; #endif } __host__ __device__ static void drift(f64_3& r, f64_3& v, uint16_t& flags, double dt, double mu) { float64_t dist = sqrt(r.lensq()); float64_t vdotr = v.x * r.x + v.y * r.y + v.z * r.z; float64_t energy = v.lensq() * 0.5 - mu / dist; flags = static_cast<uint16_t>((energy >= 0) << 3); float64_t a = -0.5 * mu / energy; float64_t n_ = sqrt(mu / (a * a * a)); float64_t ecosEo = 1.0 - dist / a; float64_t esinEo = vdotr / (n_ * a * a); float64_t e = sqrt(ecosEo * ecosEo + esinEo * esinEo); // subtract off an integer multiple of complete orbits float64_t dM = dt * n_ - M_2PI * (int) (dt * n_ / M_2PI); // remaining time to advance float64_t _dt = dM / n_; // call kepler equation solver with initial guess in dE already float64_t dE = dM - esinEo + esinEo * cos(dM) + ecosEo * sin(dM); float64_t sindE, cosdE; kepeq(dM, ecosEo, esinEo, &dE, &sindE, &cosdE, flags); float64_t fp = 1.0 - ecosEo * cosdE + esinEo * sindE; float64_t f = 1.0 + a * (cosdE - 1.0) / dist; float64_t g = _dt + (sindE - dE) / n_; float64_t fdot = -n_ * sindE * a / (dist * fp); float64_t gdot = 1.0 + (cosdE - 1.0) / fp; f64_3 r0 = r; r = r0 * f + v * g; v = r0 * fdot + v * gdot; } __host__ __device__ static void step_forward( f64_3& r, f64_3& v, uint16_t& flags, f64_3& a, uint32_t& deathtime_index, uint32_t _tbsize, uint32_t planet_n, const f64_3* h0_log, const f64_3* r_log, const float64_t* m, const uint32_t* pl_id, const float64_t* rh, double outer_radius, float64_t dt, float64_t mu) { deathtime_index = 0; for (uint32_t step = 0; step < static_cast<uint32_t>(_tbsize); step++) { if (flags == 0) { // kick // if step = 0, the acceleration is preloaded - this comes from v = v + a * (dt / 2); drift(r, v, flags, dt, mu); a = h0_log[step]; // planet 0 is not counted for (uint32_t i = 1; i < static_cast<uint32_t>(planet_n); i++) { f64_3 dr = r - r_log[step * (planet_n - 1) + i - 1]; float64_t rad = dr.lensq(); if (rad < rh[i] * rh[i] && flags == 0) { flags = static_cast<uint16_t>((pl_id[i] << 8) | 0x0001); } float64_t inv3 = 1. / (rad * sqrt(rad)); float64_t fac = m[i] * inv3; a -= dr * fac; } float64_t rad = r.lensq(); if (rad < rh[0] * rh[0]) { flags = 0x0001; } if (rad > outer_radius * outer_radius) { flags = 0x0002; } v = v + a * (dt / 2); deathtime_index = step + 1; } } } // manual diagram [5] template<typename Tuple> __host__ __device__ void operator()(Tuple args) const { uint32_t _tbsize = this->tbsize; const f64_3* h0_log = this->planet_h0_log; const f64_3* r_log = this->planet_r_log; const float64_t* m = this->planet_m; const uint32_t* ids = this->planet_id; const float64_t* rh = this->planet_rh; f64_3 r = thrust::get<0>(thrust::get<0>(args)); f64_3 v = thrust::get<1>(thrust::get<0>(args)); uint16_t flags = thrust::get<2>(thrust::get<0>(args)); uint32_t deathtime_index = 0; f64_3 a = thrust::get<1>(args); step_forward(r, v, flags, a, deathtime_index, _tbsize, planet_n, h0_log, r_log, m, ids, rh, this->outer_r, this->dt, this->mu); thrust::get<0>(thrust::get<0>(args)) = r; thrust::get<1>(thrust::get<0>(args)) = v; thrust::get<2>(thrust::get<0>(args)) = flags; thrust::get<3>(thrust::get<0>(args)) = deathtime_index; thrust::get<1>(args) = a; } }; __global__ void MVSKernel_( f64_3* r, f64_3* v, uint16_t* flags, f64_3* a, uint32_t* deathtime_index, uint32_t n, uint32_t tbsize, uint32_t planet_n, const f64_3* h0_log, const f64_3* r_log, const float64_t* m, const uint32_t* pl_id, const float64_t* rh, double outer_r, float64_t dt, float64_t mu) { // max timeblock size: 384 __shared__ f64_3 h0_log_shared[384]; __shared__ f64_3 r_log_shared[1536]; for (int i = threadIdx.x; i < tbsize; i += blockDim.x) { h0_log_shared[i] = h0_log[i]; } for (int i = threadIdx.x; i < tbsize * (planet_n - 1); i += blockDim.x) { r_log_shared[i] = r_log[i]; } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { f64_3 ri = r[i]; f64_3 vi = v[i]; uint16_t flagsi = flags[i]; f64_3 ai = a[i]; uint32_t deathtime_indexi; MVSKernel::step_forward(ri, vi, flagsi, ai, deathtime_indexi, tbsize, planet_n, h0_log_shared, r_log_shared, m, pl_id, rh, outer_r, dt, mu); r[i] = ri; v[i] = vi; flags[i] = flagsi; a[i] = ai; deathtime_index[i] = deathtime_indexi; } } WHCudaIntegrator::WHCudaIntegrator() { } WHCudaIntegrator::WHCudaIntegrator(HostPlanetPhaseSpace& pl, HostParticlePhaseSpace& pa, const Configuration& config, cudaStream_t stream) : base(pl, pa, config) { device_h0_log_0 = Dvf64_3(config.tbsize); device_h0_log_1 = Dvf64_3(config.tbsize); device_particle_a = Dvf64_3(pa.n()); device_planet_rh = Dvf64(pl.n()); cudaStreamSynchronize(stream); } Dvf64_3& WHCudaIntegrator::device_h0_log(size_t planet_data_id) { return planet_data_id % 2 ? device_h0_log_1 : device_h0_log_0; } void WHCudaIntegrator::recalculate_rh(const HostPlanetPhaseSpace& pl) { base.recalculate_rh(pl); } void WHCudaIntegrator::helio_acc_particles( const HostPlanetPhaseSpace& pl, HostParticlePhaseSpace& pa, size_t begin, size_t len, float64_t time, size_t timestep_index, bool old) { base.helio_acc_particles(pl, pa, begin, len, time, timestep_index, old); } void WHCudaIntegrator::upload_planet_log_cuda(cudaStream_t stream, size_t planet_data_id) { memcpy_htd(device_planet_rh, base.planet_rh, stream); memcpy_htd(device_h0_log(planet_data_id), base.planet_h0_log.log, stream); cudaStreamSynchronize(stream); } void WHCudaIntegrator::gather_particles(const Vs& indices, size_t begin, size_t length) { base.gather_particles(indices, begin, length); } void WHCudaIntegrator::integrate_planets_timeblock(HostPlanetPhaseSpace& pl, size_t nstep, float64_t t, double dt) { base.integrate_planets_timeblock(pl, nstep, t, dt); } void WHCudaIntegrator::load_h0(const HostPlanetPhaseSpace& pl) { base.load_h0(pl); } void WHCudaIntegrator::integrate_particles_timeblock(const HostPlanetPhaseSpace& pl, HostParticlePhaseSpace& pa, size_t begin, size_t length, float64_t t, double dt) { base.integrate_particles_timeblock(pl, pa, begin, length, t, dt); } void WHCudaIntegrator::swap_logs() { base.swap_logs(); } void WHCudaIntegrator::upload_data_cuda(cudaStream_t stream, size_t begin, size_t length) { memcpy_htd(device_particle_a, base.particle_a, stream, begin, begin, length); cudaStreamSynchronize(stream); } void WHCudaIntegrator::integrate_particles_timeblock_cuda(cudaStream_t stream, size_t planet_data_id, const DevicePlanetPhaseSpace& pl, DeviceParticlePhaseSpace& pa, double dt) { #ifndef CUDA_USE_SHARED_MEM_CACHE auto it = thrust::make_zip_iterator(thrust::make_tuple(pa.begin(), device_begin())); thrust::for_each( thrust::cuda::par.on(stream), it, it + pa.n_alive, MVSKernel( pl, device_h0_log(planet_data_id), device_planet_rh, base.outer_radius, static_cast<uint32_t>(pl.log_len), dt ) ); #else cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); uint32_t block_size, grid_size, shared_mem = 0; if (pa.n_alive < static_cast<uint32_t>(1024 * prop.multiProcessorCount)) { block_size = static_cast<uint32_t>((pa.n_alive / prop.multiProcessorCount + 31) / 32 * 32); if (block_size == 0) block_size = 32; } else { block_size = 1024; } grid_size = static_cast<uint32_t>((pa.n_alive + block_size - 1) / block_size); MVSKernel_<<<grid_size, block_size, shared_mem, stream>>>( pa.r.data().get(), pa.v.data().get(), pa.deathflags.data().get(), device_particle_a.data().get(), pa.deathtime_index.data().get(), static_cast<uint32_t>(pa.n_alive), static_cast<uint32_t>(pl.log_len), static_cast<uint32_t>(pl.n_alive), device_h0_log(planet_data_id).data().get(), pl.r_log.data().get(), pl.m.data().get(), pl.id.data().get(), device_planet_rh.data().get(), base.outer_radius, dt, pl.m[0] ); #endif } } }
e11f1cca230849b189bff951946157295552025c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <cub_helper.cuh> #include <fstream> namespace quda { using namespace gauge; bool forceMonitor() { static bool init = false; static bool monitor = false; if (!init) { char *path = getenv("QUDA_RESOURCE_PATH"); char *enable_force_monitor = getenv("QUDA_ENABLE_FORCE_MONITOR"); if (path && enable_force_monitor && strcmp(enable_force_monitor, "1") == 0) monitor = true; init = true; } return monitor; } static std::stringstream force_stream; static long long force_count = 0; static long long force_flush = 1000; // how many force samples we accumulate before flushing void flushForceMonitor() { if (!forceMonitor() || comm_rank() != 0) return; static std::string path = std::string(getenv("QUDA_RESOURCE_PATH")); static char *profile_fname = getenv("QUDA_PROFILE_OUTPUT_BASE"); std::ofstream force_file; static long long count = 0; if (count == 0) { path += (profile_fname ? std::string("/") + profile_fname + "_force.tsv" : std::string("/force.tsv")); force_file.open(path.c_str()); force_file << "Force\tL1\tL2\tdt" << std::endl; } else { force_file.open(path.c_str(), std::ios_base::app); } if (getVerbosity() >= QUDA_VERBOSE) printfQuda("Flushing force monitor data to %s\n", path.c_str()); force_file << force_stream.str(); force_file.flush(); force_file.close(); // empty the stream buffer force_stream.clear(); force_stream.str(std::string()); count++; } void forceRecord(double2 &force, double dt, const char *fname) { qudaDeviceSynchronize(); comm_allreduce_max_array((double*)&force, 2); if (comm_rank()==0) { force_stream << fname << "\t" << std::setprecision(5) << force.x << "\t" << std::setprecision(5) << force.y << "\t" << std::setprecision(5) << dt << std::endl; if (++force_count % force_flush == 0) flushForceMonitor(); } } #ifdef GPU_GAUGE_TOOLS template <typename Mom> struct MomActionArg : public ReduceArg<double> { int threads; // number of active threads required Mom mom; int X[4]; // grid dimensions MomActionArg(const Mom &mom, const GaugeField &meta) : ReduceArg<double>(), mom(mom) { threads = meta.VolumeCB(); for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir]; } }; template<int blockSize, typename Float, typename Mom> __global__ void computeMomAction(MomActionArg<Mom> arg){ int x = threadIdx.x + blockIdx.x*blockDim.x; int parity = threadIdx.y; double action = 0.0; while (x < arg.threads) { // loop over direction for (int mu=0; mu<4; mu++) { Float v[10]; arg.mom.load(v, x, mu, parity); double local_sum = 0.0; for (int j=0; j<6; j++) local_sum += v[j]*v[j]; for (int j=6; j<9; j++) local_sum += 0.5*v[j]*v[j]; local_sum -= 4.0; action += local_sum; } x += blockDim.x*gridDim.x; } // perform final inter-block reduction and write out result reduce2d<blockSize,2>(arg, action); } template<typename Float, typename Mom> class MomAction : TunableLocalParity { MomActionArg<Mom> &arg; const GaugeField &meta; private: bool tuneGridDim() const { return true; } public: MomAction(MomActionArg<Mom> &arg, const GaugeField &meta) : arg(arg), meta(meta) {} virtual ~MomAction () { } void apply(const hipStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION){ arg.result_h[0] = 0.0; TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_LOCAL_PARITY(computeMomAction, tp, stream, arg, Float, Mom); } else { errorQuda("CPU not supported yet\n"); } } TuneKey tuneKey() const { std::stringstream aux; aux << "threads=" << arg.threads << ",prec=" << sizeof(Float); return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } long long flops() const { return 4*2*arg.threads*23; } long long bytes() const { return 4*2*arg.threads*arg.mom.Bytes(); } }; template<typename Float, typename Mom> void momAction(const Mom mom, const GaugeField& meta, double &action) { MomActionArg<Mom> arg(mom, meta); MomAction<Float,Mom> momAction(arg, meta); momAction.apply(0); qudaDeviceSynchronize(); comm_allreduce((double*)arg.result_h); action = arg.result_h[0]; } template<typename Float> double momAction(const GaugeField& mom) { double action = 0.0; if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) { momAction<Float>(FloatNOrder<Float,10,2,10>(mom), mom, action); } else { errorQuda("Reconstruction type %d not supported", mom.Reconstruct()); } } else { errorQuda("Gauge Field order %d not supported", mom.Order()); } return action; } #endif double computeMomAction(const GaugeField& mom) { double action = 0.0; #ifdef GPU_GAUGE_TOOLS if (mom.Precision() == QUDA_DOUBLE_PRECISION) { action = momAction<double>(mom); } else if(mom.Precision() == QUDA_SINGLE_PRECISION) { action = momAction<float>(mom); } else { errorQuda("Precision %d not supported", mom.Precision()); } #else errorQuda("%s not build", __func__); #endif return action; } #ifdef GPU_GAUGE_TOOLS template<typename Float, QudaReconstructType reconstruct_> struct UpdateMomArg : public ReduceArg<double2> { int threads; static constexpr int force_recon = (reconstruct_ == QUDA_RECONSTRUCT_10 ? 11 : 18); FloatNOrder<Float,18,2,11> mom; FloatNOrder<Float,18,2,force_recon> force; Float coeff; int X[4]; // grid dimensions on mom int E[4]; // grid dimensions on force (possibly extended) int border[4]; // UpdateMomArg(GaugeField &mom, const Float &coeff, GaugeField &force) : threads(mom.VolumeCB()), mom(mom), coeff(coeff), force(force) { for (int dir=0; dir<4; ++dir) { X[dir] = mom.X()[dir]; E[dir] = force.X()[dir]; border[dir] = force.R()[dir]; } } }; /** @brief Functor for finding the maximum over a double2 field. Each lane of the double2 is evaluated separately. This functor is passed to the reduce helper. */ struct max_reducer2 { __device__ __host__ inline double2 operator()(const double2 &a, const double2 &b) { return make_double2(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y); } }; template <int blockSize, typename Float, typename Arg> __global__ void UpdateMomKernel(Arg arg) { int x_cb = blockIdx.x*blockDim.x + threadIdx.x; int parity = threadIdx.y; double2 norm2 = make_double2(0.0,0.0); max_reducer2 r; while (x_cb<arg.threads) { int x[4]; getCoords(x, x_cb, arg.X, parity); for (int d=0; d<4; d++) x[d] += arg.border[d]; int e_cb = linkIndex(x,arg.E); #pragma unroll for (int d=0; d<4; d++) { Matrix<complex<Float>,3> m = arg.mom(d, x_cb, parity); Matrix<complex<Float>,3> f = arg.force(d, e_cb, parity); // project to traceless anti-hermitian prior to taking norm makeAntiHerm(f); // compute force norms norm2 = r(make_double2(f.L1(), f.L2()), norm2); m = m + arg.coeff * f; // strictly speaking this shouldn't be needed since the // momentum should already be traceless anti-hermitian but at // present the unit test will fail without this makeAntiHerm(m); arg.mom(d, x_cb, parity) = m; } x_cb += gridDim.x*blockDim.x; } // perform final inter-block reduction and write out result reduce2d<blockSize,2,double2,false,max_reducer2>(arg, norm2, 0); } // UpdateMom template<typename Float, typename Arg> class UpdateMom : TunableLocalParity { Arg &arg; const GaugeField &meta; private: bool tuneGridDim() const { return true; } public: UpdateMom(Arg &arg, const GaugeField &meta) : arg(arg), meta(meta) {} virtual ~UpdateMom () { } void apply(const hipStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_LOCAL_PARITY(UpdateMomKernel, tp, stream, arg, Float); } else { errorQuda("CPU not supported yet\n"); } } TuneKey tuneKey() const { std::stringstream aux; aux << "threads=" << arg.threads << ",prec=" << sizeof(Float); return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void preTune() { arg.mom.save();} void postTune() { arg.mom.load();} long long flops() const { return 4*2*arg.threads*(36+42); } long long bytes() const { return 4*2*arg.threads*(2*arg.mom.Bytes()+arg.force.Bytes()); } }; template<typename Float, QudaReconstructType reconstruct> void updateMomentum(GaugeField &mom, Float coeff, GaugeField &force, const char *fname) { UpdateMomArg<Float,reconstruct> arg(mom, coeff, force); UpdateMom<Float,decltype(arg)> update(arg, force); update.apply(0); if (forceMonitor()) forceRecord(*((double2*)arg.result_h), arg.coeff, fname); } template <typename Float> void updateMomentum(GaugeField &mom, double coeff, GaugeField &force, const char *fname) { if (mom.Reconstruct() != QUDA_RECONSTRUCT_10) errorQuda("Momentum field with reconstruct %d not supported", mom.Reconstruct()); if (force.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Force field with order %d not supported", force.Order()); if (force.Reconstruct() == QUDA_RECONSTRUCT_10) { updateMomentum<Float,QUDA_RECONSTRUCT_10>(mom, coeff, force, fname); } else if (force.Reconstruct() == QUDA_RECONSTRUCT_NO) { updateMomentum<Float,QUDA_RECONSTRUCT_NO>(mom, coeff, force, fname); } else { errorQuda("Unsupported force reconstruction: %d", force.Reconstruct()); } } #endif // GPU_GAUGE_TOOLS void updateMomentum(GaugeField &mom, double coeff, GaugeField &force, const char *fname) { #ifdef GPU_GAUGE_TOOLS if(mom.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", mom.Order()); if (mom.Precision() != force.Precision()) errorQuda("Mixed precision not supported: %d %d\n", mom.Precision(), force.Precision()); if (mom.Precision() == QUDA_DOUBLE_PRECISION) { updateMomentum<double>(mom, coeff, force, fname); } else if (mom.Precision() == QUDA_SINGLE_PRECISION) { updateMomentum<float>(mom, coeff, force, fname); } else { errorQuda("Unsupported precision: %d", mom.Precision()); } checkCudaError(); #else errorQuda("%s not built", __func__); #endif // GPU_GAUGE_TOOLS return; } #ifdef GPU_GAUGE_TOOLS template<typename Float, typename Force, typename Gauge> struct ApplyUArg { int threads; Force force; Gauge U; int X[4]; // grid dimensions ApplyUArg(Force &force, Gauge &U, GaugeField &meta) : threads(meta.VolumeCB()), force(force), U(U) { for (int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir]; } }; template<typename Float, typename Force, typename Gauge> __global__ void ApplyUKernel(ApplyUArg<Float,Force,Gauge> arg) { int x = blockIdx.x*blockDim.x + threadIdx.x; int parity = threadIdx.y; Matrix<complex<Float>,3> f, u; while (x<arg.threads) { for (int d=0; d<4; d++) { arg.force.load(reinterpret_cast<Float*>(f.data), x, d, parity); arg.U.load(reinterpret_cast<Float*>(u.data), x, d, parity); f = u * f; arg.force.save(reinterpret_cast<Float*>(f.data), x, d, parity); } x += gridDim.x*blockDim.x; } return; } // ApplyU template<typename Float, typename Force, typename Gauge> class ApplyU : TunableLocalParity { ApplyUArg<Float, Force, Gauge> &arg; const GaugeField &meta; private: unsigned int minThreads() const { return arg.threads; } public: ApplyU(ApplyUArg<Float,Force,Gauge> &arg, const GaugeField &meta) : arg(arg), meta(meta) {} virtual ~ApplyU () { } void apply(const hipStream_t &stream){ if(meta.Location() == QUDA_CUDA_FIELD_LOCATION){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( ApplyUKernel<Float,Force,Gauge>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); } else { errorQuda("CPU not supported yet\n"); } } TuneKey tuneKey() const { std::stringstream aux; aux << "threads=" << arg.threads << ",prec=" << sizeof(Float); return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void preTune() { arg.force.save();} void postTune() { arg.force.load();} long long flops() const { return 4*2*arg.threads*198; } long long bytes() const { return 4*2*arg.threads*(2*arg.force.Bytes()+arg.U.Bytes()); } }; template<typename Float, typename Force, typename Gauge> void applyU(Force force, Gauge U, GaugeField &meta) { ApplyUArg<Float,Force,Gauge> arg(force, U, meta); ApplyU<Float,Force,Gauge> applyU(arg, meta); applyU.apply(0); qudaDeviceSynchronize(); } template <typename Float> void applyU(GaugeField &force, GaugeField &U) { if (force.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field with reconstruct %d not supported", force.Reconstruct()); if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { applyU<Float>(FloatNOrder<Float, 18, 2, 18>(force), FloatNOrder<Float, 18, 2, 18>(U), force); } else if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { applyU<Float>(FloatNOrder<Float, 18, 2, 18>(force), FloatNOrder<Float, 18, 2, 12>(U), force); } else { errorQuda("Unsupported gauge reconstruction: %d", U.Reconstruct()); } } #endif // GPU_GAUGE_TOOLS void applyU(GaugeField &force, GaugeField &U) { #ifdef GPU_GAUGE_TOOLS if(force.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", force.Order()); if (force.Precision() != U.Precision()) errorQuda("Mixed precision not supported: %d %d\n", force.Precision(), U.Precision()); if (force.Precision() == QUDA_DOUBLE_PRECISION) { applyU<double>(force, U); } else { errorQuda("Unsupported precision: %d", force.Precision()); } checkCudaError(); #else errorQuda("%s not built", __func__); #endif // GPU_GAUGE_TOOLS return; } } // namespace quda
e11f1cca230849b189bff951946157295552025c.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <cub_helper.cuh> #include <fstream> namespace quda { using namespace gauge; bool forceMonitor() { static bool init = false; static bool monitor = false; if (!init) { char *path = getenv("QUDA_RESOURCE_PATH"); char *enable_force_monitor = getenv("QUDA_ENABLE_FORCE_MONITOR"); if (path && enable_force_monitor && strcmp(enable_force_monitor, "1") == 0) monitor = true; init = true; } return monitor; } static std::stringstream force_stream; static long long force_count = 0; static long long force_flush = 1000; // how many force samples we accumulate before flushing void flushForceMonitor() { if (!forceMonitor() || comm_rank() != 0) return; static std::string path = std::string(getenv("QUDA_RESOURCE_PATH")); static char *profile_fname = getenv("QUDA_PROFILE_OUTPUT_BASE"); std::ofstream force_file; static long long count = 0; if (count == 0) { path += (profile_fname ? std::string("/") + profile_fname + "_force.tsv" : std::string("/force.tsv")); force_file.open(path.c_str()); force_file << "Force\tL1\tL2\tdt" << std::endl; } else { force_file.open(path.c_str(), std::ios_base::app); } if (getVerbosity() >= QUDA_VERBOSE) printfQuda("Flushing force monitor data to %s\n", path.c_str()); force_file << force_stream.str(); force_file.flush(); force_file.close(); // empty the stream buffer force_stream.clear(); force_stream.str(std::string()); count++; } void forceRecord(double2 &force, double dt, const char *fname) { qudaDeviceSynchronize(); comm_allreduce_max_array((double*)&force, 2); if (comm_rank()==0) { force_stream << fname << "\t" << std::setprecision(5) << force.x << "\t" << std::setprecision(5) << force.y << "\t" << std::setprecision(5) << dt << std::endl; if (++force_count % force_flush == 0) flushForceMonitor(); } } #ifdef GPU_GAUGE_TOOLS template <typename Mom> struct MomActionArg : public ReduceArg<double> { int threads; // number of active threads required Mom mom; int X[4]; // grid dimensions MomActionArg(const Mom &mom, const GaugeField &meta) : ReduceArg<double>(), mom(mom) { threads = meta.VolumeCB(); for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir]; } }; template<int blockSize, typename Float, typename Mom> __global__ void computeMomAction(MomActionArg<Mom> arg){ int x = threadIdx.x + blockIdx.x*blockDim.x; int parity = threadIdx.y; double action = 0.0; while (x < arg.threads) { // loop over direction for (int mu=0; mu<4; mu++) { Float v[10]; arg.mom.load(v, x, mu, parity); double local_sum = 0.0; for (int j=0; j<6; j++) local_sum += v[j]*v[j]; for (int j=6; j<9; j++) local_sum += 0.5*v[j]*v[j]; local_sum -= 4.0; action += local_sum; } x += blockDim.x*gridDim.x; } // perform final inter-block reduction and write out result reduce2d<blockSize,2>(arg, action); } template<typename Float, typename Mom> class MomAction : TunableLocalParity { MomActionArg<Mom> &arg; const GaugeField &meta; private: bool tuneGridDim() const { return true; } public: MomAction(MomActionArg<Mom> &arg, const GaugeField &meta) : arg(arg), meta(meta) {} virtual ~MomAction () { } void apply(const cudaStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION){ arg.result_h[0] = 0.0; TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_LOCAL_PARITY(computeMomAction, tp, stream, arg, Float, Mom); } else { errorQuda("CPU not supported yet\n"); } } TuneKey tuneKey() const { std::stringstream aux; aux << "threads=" << arg.threads << ",prec=" << sizeof(Float); return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } long long flops() const { return 4*2*arg.threads*23; } long long bytes() const { return 4*2*arg.threads*arg.mom.Bytes(); } }; template<typename Float, typename Mom> void momAction(const Mom mom, const GaugeField& meta, double &action) { MomActionArg<Mom> arg(mom, meta); MomAction<Float,Mom> momAction(arg, meta); momAction.apply(0); qudaDeviceSynchronize(); comm_allreduce((double*)arg.result_h); action = arg.result_h[0]; } template<typename Float> double momAction(const GaugeField& mom) { double action = 0.0; if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) { momAction<Float>(FloatNOrder<Float,10,2,10>(mom), mom, action); } else { errorQuda("Reconstruction type %d not supported", mom.Reconstruct()); } } else { errorQuda("Gauge Field order %d not supported", mom.Order()); } return action; } #endif double computeMomAction(const GaugeField& mom) { double action = 0.0; #ifdef GPU_GAUGE_TOOLS if (mom.Precision() == QUDA_DOUBLE_PRECISION) { action = momAction<double>(mom); } else if(mom.Precision() == QUDA_SINGLE_PRECISION) { action = momAction<float>(mom); } else { errorQuda("Precision %d not supported", mom.Precision()); } #else errorQuda("%s not build", __func__); #endif return action; } #ifdef GPU_GAUGE_TOOLS template<typename Float, QudaReconstructType reconstruct_> struct UpdateMomArg : public ReduceArg<double2> { int threads; static constexpr int force_recon = (reconstruct_ == QUDA_RECONSTRUCT_10 ? 11 : 18); FloatNOrder<Float,18,2,11> mom; FloatNOrder<Float,18,2,force_recon> force; Float coeff; int X[4]; // grid dimensions on mom int E[4]; // grid dimensions on force (possibly extended) int border[4]; // UpdateMomArg(GaugeField &mom, const Float &coeff, GaugeField &force) : threads(mom.VolumeCB()), mom(mom), coeff(coeff), force(force) { for (int dir=0; dir<4; ++dir) { X[dir] = mom.X()[dir]; E[dir] = force.X()[dir]; border[dir] = force.R()[dir]; } } }; /** @brief Functor for finding the maximum over a double2 field. Each lane of the double2 is evaluated separately. This functor is passed to the reduce helper. */ struct max_reducer2 { __device__ __host__ inline double2 operator()(const double2 &a, const double2 &b) { return make_double2(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y); } }; template <int blockSize, typename Float, typename Arg> __global__ void UpdateMomKernel(Arg arg) { int x_cb = blockIdx.x*blockDim.x + threadIdx.x; int parity = threadIdx.y; double2 norm2 = make_double2(0.0,0.0); max_reducer2 r; while (x_cb<arg.threads) { int x[4]; getCoords(x, x_cb, arg.X, parity); for (int d=0; d<4; d++) x[d] += arg.border[d]; int e_cb = linkIndex(x,arg.E); #pragma unroll for (int d=0; d<4; d++) { Matrix<complex<Float>,3> m = arg.mom(d, x_cb, parity); Matrix<complex<Float>,3> f = arg.force(d, e_cb, parity); // project to traceless anti-hermitian prior to taking norm makeAntiHerm(f); // compute force norms norm2 = r(make_double2(f.L1(), f.L2()), norm2); m = m + arg.coeff * f; // strictly speaking this shouldn't be needed since the // momentum should already be traceless anti-hermitian but at // present the unit test will fail without this makeAntiHerm(m); arg.mom(d, x_cb, parity) = m; } x_cb += gridDim.x*blockDim.x; } // perform final inter-block reduction and write out result reduce2d<blockSize,2,double2,false,max_reducer2>(arg, norm2, 0); } // UpdateMom template<typename Float, typename Arg> class UpdateMom : TunableLocalParity { Arg &arg; const GaugeField &meta; private: bool tuneGridDim() const { return true; } public: UpdateMom(Arg &arg, const GaugeField &meta) : arg(arg), meta(meta) {} virtual ~UpdateMom () { } void apply(const cudaStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_LOCAL_PARITY(UpdateMomKernel, tp, stream, arg, Float); } else { errorQuda("CPU not supported yet\n"); } } TuneKey tuneKey() const { std::stringstream aux; aux << "threads=" << arg.threads << ",prec=" << sizeof(Float); return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void preTune() { arg.mom.save();} void postTune() { arg.mom.load();} long long flops() const { return 4*2*arg.threads*(36+42); } long long bytes() const { return 4*2*arg.threads*(2*arg.mom.Bytes()+arg.force.Bytes()); } }; template<typename Float, QudaReconstructType reconstruct> void updateMomentum(GaugeField &mom, Float coeff, GaugeField &force, const char *fname) { UpdateMomArg<Float,reconstruct> arg(mom, coeff, force); UpdateMom<Float,decltype(arg)> update(arg, force); update.apply(0); if (forceMonitor()) forceRecord(*((double2*)arg.result_h), arg.coeff, fname); } template <typename Float> void updateMomentum(GaugeField &mom, double coeff, GaugeField &force, const char *fname) { if (mom.Reconstruct() != QUDA_RECONSTRUCT_10) errorQuda("Momentum field with reconstruct %d not supported", mom.Reconstruct()); if (force.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Force field with order %d not supported", force.Order()); if (force.Reconstruct() == QUDA_RECONSTRUCT_10) { updateMomentum<Float,QUDA_RECONSTRUCT_10>(mom, coeff, force, fname); } else if (force.Reconstruct() == QUDA_RECONSTRUCT_NO) { updateMomentum<Float,QUDA_RECONSTRUCT_NO>(mom, coeff, force, fname); } else { errorQuda("Unsupported force reconstruction: %d", force.Reconstruct()); } } #endif // GPU_GAUGE_TOOLS void updateMomentum(GaugeField &mom, double coeff, GaugeField &force, const char *fname) { #ifdef GPU_GAUGE_TOOLS if(mom.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", mom.Order()); if (mom.Precision() != force.Precision()) errorQuda("Mixed precision not supported: %d %d\n", mom.Precision(), force.Precision()); if (mom.Precision() == QUDA_DOUBLE_PRECISION) { updateMomentum<double>(mom, coeff, force, fname); } else if (mom.Precision() == QUDA_SINGLE_PRECISION) { updateMomentum<float>(mom, coeff, force, fname); } else { errorQuda("Unsupported precision: %d", mom.Precision()); } checkCudaError(); #else errorQuda("%s not built", __func__); #endif // GPU_GAUGE_TOOLS return; } #ifdef GPU_GAUGE_TOOLS template<typename Float, typename Force, typename Gauge> struct ApplyUArg { int threads; Force force; Gauge U; int X[4]; // grid dimensions ApplyUArg(Force &force, Gauge &U, GaugeField &meta) : threads(meta.VolumeCB()), force(force), U(U) { for (int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir]; } }; template<typename Float, typename Force, typename Gauge> __global__ void ApplyUKernel(ApplyUArg<Float,Force,Gauge> arg) { int x = blockIdx.x*blockDim.x + threadIdx.x; int parity = threadIdx.y; Matrix<complex<Float>,3> f, u; while (x<arg.threads) { for (int d=0; d<4; d++) { arg.force.load(reinterpret_cast<Float*>(f.data), x, d, parity); arg.U.load(reinterpret_cast<Float*>(u.data), x, d, parity); f = u * f; arg.force.save(reinterpret_cast<Float*>(f.data), x, d, parity); } x += gridDim.x*blockDim.x; } return; } // ApplyU template<typename Float, typename Force, typename Gauge> class ApplyU : TunableLocalParity { ApplyUArg<Float, Force, Gauge> &arg; const GaugeField &meta; private: unsigned int minThreads() const { return arg.threads; } public: ApplyU(ApplyUArg<Float,Force,Gauge> &arg, const GaugeField &meta) : arg(arg), meta(meta) {} virtual ~ApplyU () { } void apply(const cudaStream_t &stream){ if(meta.Location() == QUDA_CUDA_FIELD_LOCATION){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); ApplyUKernel<Float,Force,Gauge><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); } else { errorQuda("CPU not supported yet\n"); } } TuneKey tuneKey() const { std::stringstream aux; aux << "threads=" << arg.threads << ",prec=" << sizeof(Float); return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void preTune() { arg.force.save();} void postTune() { arg.force.load();} long long flops() const { return 4*2*arg.threads*198; } long long bytes() const { return 4*2*arg.threads*(2*arg.force.Bytes()+arg.U.Bytes()); } }; template<typename Float, typename Force, typename Gauge> void applyU(Force force, Gauge U, GaugeField &meta) { ApplyUArg<Float,Force,Gauge> arg(force, U, meta); ApplyU<Float,Force,Gauge> applyU(arg, meta); applyU.apply(0); qudaDeviceSynchronize(); } template <typename Float> void applyU(GaugeField &force, GaugeField &U) { if (force.Reconstruct() != QUDA_RECONSTRUCT_NO) errorQuda("Force field with reconstruct %d not supported", force.Reconstruct()); if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { applyU<Float>(FloatNOrder<Float, 18, 2, 18>(force), FloatNOrder<Float, 18, 2, 18>(U), force); } else if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) { applyU<Float>(FloatNOrder<Float, 18, 2, 18>(force), FloatNOrder<Float, 18, 2, 12>(U), force); } else { errorQuda("Unsupported gauge reconstruction: %d", U.Reconstruct()); } } #endif // GPU_GAUGE_TOOLS void applyU(GaugeField &force, GaugeField &U) { #ifdef GPU_GAUGE_TOOLS if(force.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", force.Order()); if (force.Precision() != U.Precision()) errorQuda("Mixed precision not supported: %d %d\n", force.Precision(), U.Precision()); if (force.Precision() == QUDA_DOUBLE_PRECISION) { applyU<double>(force, U); } else { errorQuda("Unsupported precision: %d", force.Precision()); } checkCudaError(); #else errorQuda("%s not built", __func__); #endif // GPU_GAUGE_TOOLS return; } } // namespace quda
8c8dc39be7c22bf665bee9777ae63986abaadfb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* PVM_multi.cu Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group Copyright (C) 2005 University of Oxford */ /* Part of FSL - FMRIB's Software Library http://www.fmrib.ox.ac.uk/fsl [email protected] Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance Imaging of the Brain), Department of Clinical Neurology, Oxford University, Oxford, UK LICENCE FMRIB Software Library, Release 5.0 (c) 2012, The University of Oxford (the "Software") The Software remains the property of the University of Oxford ("the University"). The Software is distributed "AS IS" under this Licence solely for non-commercial use in the hope that it will be useful, but in order that the University as a charitable foundation protects its assets for the benefit of its educational and research purposes, the University makes clear that no condition is made or to be implied, nor is any warranty given or to be implied, as to the accuracy of the Software, or that it will be suitable for any particular purpose or for use under any specific conditions. Furthermore, the University disclaims all responsibility for the use which is made of the Software. It further disclaims any liability for the outcomes arising from using the Software. The Licensee agrees to indemnify the University and hold the University harmless from and against any and all claims, damages and liabilities asserted by third parties (including claims for negligence) which arise directly or indirectly from the use of the Software or the sale of any products based on the Software. No part of the Software may be reproduced, modified, transmitted or transferred in any form or by any means, electronic or mechanical, without the express permission of the University. The permission of the University is not required if the said reproduction, modification, transmission or transference is done without financial return, the conditions of this Licence are imposed upon the receiver of the product, and all original and amended source code is included in any transmitted product. You may be held legally responsible for any copyright infringement that is caused or encouraged by your failure to abide by these terms and conditions. You are not permitted under this Licence to use this Software commercially. Use for which any financial return is received shall be defined as commercial use, and includes (1) integration of all or part of the source code or the Software into a product for sale or license by or on behalf of Licensee to third parties or (2) use of the Software or any derivative of it for research with the final aim of developing software products for sale or license to a third party or (3) use of the Software or any derivative of it for research with the final aim of developing non-software products for sale or license to a third party, or (4) use of the Software to provide any service to an external organisation for which payment is received. If you are interested in using the Software commercially, please contact Isis Innovation Limited ("Isis"), the technology transfer company of the University, to negotiate a licence. Contact details are: [email protected] quoting reference DE/9564. */ #include "diffmodels_utils.h" #include "levenberg_marquardt.cu" #include "options.h" ///////////////////////////////////// ///////////////////////////////////// /// PVM_multi /// ///////////////////////////////////// ///////////////////////////////////// __device__ inline float isoterm_PVM_multi(const int pt,const float* _a,const float* _b, const float *bvals){ return exp(-*_a*log(1+bvals[pt]**_b)); } __device__ inline float isoterm_a_PVM_multi(const int pt,const float* _a,const float* _b, const float *bvals){ return -log(1+bvals[pt]**_b)*exp(-*_a*log(1+bvals[pt]**_b)); } __device__ inline float isoterm_b_PVM_multi(const int pt,const float* _a,const float* _b, const float *bvals){ return -*_a*bvals[pt]/(1+bvals[pt]**_b)*exp(-*_a*log(1+bvals[pt]**_b)); } __device__ inline float anisoterm_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; if(Gamma_for_ball_only==1){ return exp(-bvals[pt]**_a**_b*dp*dp); }else if(Gamma_for_ball_only==2){ return exp(-bvals[pt]*3**_a**_b*invR*((1-R)*dp*dp+R)); }else{ return exp(-*_a*log(1+bvals[pt]**_b*(dp*dp))); } } __device__ inline float anisoterm_a_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; if(Gamma_for_ball_only==1){ return (-bvals[pt]**_b*dp*dp* exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=bvals[pt]*3**_b*invR*((1-R)*dp*dp+R); return(-dp2*exp(-dp2**_a)); }else{ return -log(1+bvals[pt]*(dp*dp)**_b)* exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b)); } } __device__ inline float anisoterm_b_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; if(Gamma_for_ball_only==1){ return(-bvals[pt]**_a*dp*dp*exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=bvals[pt]*3**_a*invR*((1-R)*dp*dp+R); return(-dp2*exp(-dp2**_b)); }else{ return (-*_a*bvals[pt]*(dp*dp)/ (1+bvals[pt]*(dp*dp)**_b)*exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b))); } } __device__ inline float anisoterm_th_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float _th,const float _ph,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float sinth,costh,sinph,cosph; sincos(_th,&sinth,&costh); sincos(_ph,&sinph,&cosph); float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; float dp1 = costh* (bvecs[pt]*cosph + bvecs[ndirections+pt]*sinph) - bvecs[(2*ndirections)+pt]*sinth; if(Gamma_for_ball_only==1){ return(-2*bvals[pt]**_a**_b*dp*dp1*exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=2*bvals[pt]*3**_a**_b*invR*(1-R)*dp1; return(-dp2*exp(-bvals[pt]*3**_a**_b*invR*((1-R)*dp*dp+R))); }else{ return (-*_a**_b*bvals[pt]/(1+bvals[pt]*(dp*dp)**_b)*exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b))*2*dp*dp1); } } __device__ inline float anisoterm_ph_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float _th,const float _ph,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float sinth,sinph,cosph; sinth=sin(_th); sincos(_ph,&sinph,&cosph); float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; float dp1 = sinth* (-bvecs[pt]*sinph + bvecs[ndirections+pt]*cosph); if(Gamma_for_ball_only==1){ return(-2*bvals[pt]**_a**_b*dp*dp1*exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=2*bvals[pt]*3**_a**_b*invR*(1-R)*dp1; return(-dp2*exp(-bvals[pt]*3**_a**_b*invR*((1-R)*dp*dp+R))); }else{ return (-*_a**_b*bvals[pt]/(1+bvals[pt]*(dp*dp)**_b)*exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b))*2*dp*dp1); } } //in diffmodel.cc __device__ void fix_fsum_PVM_multi( //INPUT bool m_include_f0, int nfib, int nparams, //INPUT - OUTPUT){ float *params) { float sumf=0; if (m_include_f0) sumf=params[nparams-1]; for(int i=0;i<nfib;i++){ if (params[3+(i*3)]==0) params[3+(i*3)]=FSMALL_gpu; sumf+=params[3+(i*3)]; if(sumf>=1){ for(int j=i;j<nfib;j++) params[3+(j*3)]=FSMALL_gpu; break; } } } //in diffmodel.cc __device__ void sort_PVM_multi(int nfib,float* params) { float temp_f, temp_th, temp_ph; // Order vector descending using f parameters as index for(int i=1; i<(nfib); i++){ for(int j=0; j<(nfib-i); j++){ if (params[3+j*3] < params[3+(j+1)*3]){ temp_f = params[3+j*3]; temp_th = params[3+j*3+1]; temp_ph = params[3+j*3+2]; params[3+j*3] = params[3+(j+1)*3]; params[3+j*3+1] = params[3+(j+1)*3+1]; params[3+j*3+2] = params[3+(j+1)*3+2]; params[3+(j+1)*3] = temp_f; params[3+(j+1)*3+1] = temp_th; params[3+(j+1)*3+2] = temp_ph; } } } } //cost function PVM_multi __device__ void cf_PVM_multi( //INPUT const float* params, const float* mdata, const float* bvecs, const float* bvals, const float R, const float invR, const int ndirections, const int nfib, const int nparams, const bool m_include_f0, const int idSubVOX, const int Gamma_for_ball_only, float* reduction, //shared memory float* fs, //shared memory float* x, //shared memory float* _a, //shared memory float* _b, //shared memory float* sumf, //shared memory //OUTPUT double* cfv) { if(idSubVOX<nfib){ int kk = 3+3*(idSubVOX); float sinth,costh,sinph,cosph; sincos(params[kk+1],&sinth,&costh); sincos(params[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(params[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } if(idSubVOX==0){ *_a= abs(params[1]); *_b= abs(params[2]); *cfv = 0.0; *sumf=0; for(int k=0;k<nfib;k++) *sumf+= fs[k]; } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; float err; float3 x2; int dir_iter=idSubVOX; __syncthreads(); reduction[idSubVOX]=0; for(int dir=0;dir<ndir;dir++){ err = 0.0; for(int k=0;k<nfib;k++){ x2.x=x[k*3]; x2.y=x[k*3+1]; x2.z=x[k*3+2]; err += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,x2,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); } if(m_include_f0){ float temp_f0=x2f_gpu(params[nparams-1]); err = (abs(params[0])*(temp_f0+((1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+err)))-mdata[dir_iter]; }else{ err = abs(params[0])*((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+err)-mdata[dir_iter]; } reduction[idSubVOX]+= err*err; dir_iter+=THREADS_BLOCK_FIT; } __syncthreads(); if(idSubVOX==0){ for(int i=0;i<THREADS_BLOCK_FIT;i++){ *cfv+=reduction[i]; } } } //gradient function PVM_multi __device__ void grad_PVM_multi( //INPUT const float* params, const float* mdata, const float* bvecs, const float* bvals, const float R, const float invR, const int ndirections, const int nfib, const int nparams, const bool m_include_f0, const int idSubVOX, const int Gamma_for_ball_only, float* J, //shared memory float* reduction, //shared memory float* fs, //shared memory float* x, //shared memory float* _a, //shared memory float* _b, //shared memory float* sumf, //shared memory //OUTPUT float* grad) { if(idSubVOX<nfib){ int kk = 3+3*(idSubVOX); float sinth,costh,sinph,cosph; sincos(params[kk+1],&sinth,&costh); sincos(params[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(params[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } if(idSubVOX==0){ *_a= abs(params[1]); *_b= abs(params[2]); *sumf=0; for(int k=0;k<nfib;k++) *sumf+= fs[k]; for (int p=0;p<nparams;p++) grad[p]=0; } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; int max_dir = ndirections/THREADS_BLOCK_FIT; if(ndirections%THREADS_BLOCK_FIT) max_dir++; float* myJ = &J[idSubVOX*nparams]; float diff; float sig; float3 xx; int dir_iter=idSubVOX; __syncthreads(); for(int dir=0;dir<max_dir;dir++){ for (int p=0; p<nparams; p++) myJ[p]=0; if(dir<ndir){ sig = 0; for(int k=0;k<nfib;k++){ int kk = 3+3*(k); xx.x=x[k*3]; xx.y=x[k*3+1]; xx.z=x[k*3+2]; sig += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_a_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_b_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk] = abs(params[0])*(anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only) -isoterm_PVM_multi(dir_iter,_a,_b,bvals))*two_pi_gpu*sign_gpu(params[kk])*1/(1+params[kk]*params[kk]); myJ[kk+1] = abs(params[0])*fs[k]* anisoterm_th_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk+2] = abs(params[0])*fs[k]* anisoterm_ph_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); } if(m_include_f0){ float temp_f0=x2f_gpu(params[nparams-1]); myJ[nparams-1]= abs(params[0])*(1-isoterm_PVM_multi(dir_iter,_a,_b,bvals))* two_pi_gpu*sign_gpu(params[nparams-1])*1/(1+params[nparams-1]*params[nparams-1]); sig=abs(params[0])*((temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,bvals))+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); }else{ sig = abs(params[0]) * ((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); } diff = sig - mdata[dir_iter]; myJ[0] = (params[0]>0?1.0:-1.0)*sig/params[0]; } for (int p=0;p<nparams;p++){ reduction[idSubVOX]=2*myJ[p]*diff; __syncthreads(); if(idSubVOX==0){ for(int i=0;i<THREADS_BLOCK_FIT;i++){ grad[p] += reduction[i]; } } __syncthreads(); } dir_iter+=THREADS_BLOCK_FIT; } } //hessian function PVM_multi __device__ void hess_PVM_multi( //INPUT const float* params, const float* bvecs, const float* bvals, const float R, const float invR, const int ndirections, const int nfib, const int nparams, const bool m_include_f0, const int idSubVOX, const int Gamma_for_ball_only, float* J, //shared memory float* reduction, //shared memory float* fs, //shared memory float* x, //shared memory float* _a, //shared memory float* _b, //shared memory float* sumf, //shared memory //OUTPUT float* hess) { if(idSubVOX<nfib){ int kk = 3+3*(idSubVOX); float sinth,costh,sinph,cosph; sincos(params[kk+1],&sinth,&costh); sincos(params[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(params[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } if(idSubVOX==0){ *_a= abs(params[1]); *_b= abs(params[2]); *sumf=0; for(int k=0;k<nfib;k++) *sumf+= fs[k]; for (int p=0;p<nparams;p++){ for (int p2=0;p2<nparams;p2++){ hess[p*nparams+p2] = 0; } } } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; int max_dir = ndirections/THREADS_BLOCK_FIT; if(ndirections%THREADS_BLOCK_FIT) max_dir++; float* myJ = &J[idSubVOX*nparams]; float sig; float3 xx; int dir_iter=idSubVOX; __syncthreads(); for(int dir=0;dir<max_dir;dir++){ for (int p=0; p<nparams; p++) myJ[p]=0; if(dir<ndir){ sig = 0; for(int k=0;k<nfib;k++){ int kk = 3+3*(k); xx.x=x[k*3]; xx.y=x[k*3+1]; xx.z=x[k*3+2]; sig += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); float cov = two_pi_gpu*sign_gpu(params[kk])*1/(1+params[kk]*params[kk]); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_a_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_b_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk] = abs(params[0])* (anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only)- isoterm_PVM_multi(dir_iter,_a,_b,bvals))*cov; myJ[kk+1] = abs(params[0])*fs[k]* anisoterm_th_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk+2] = abs(params[0])*fs[k]* anisoterm_ph_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); } if(m_include_f0){ float temp_f0=x2f_gpu(params[nparams-1]); myJ[nparams-1]= abs(params[0])*(1-isoterm_PVM_multi(dir_iter,_a,_b,bvals))*two_pi_gpu*sign_gpu(params[nparams-1])*1/(1+params[nparams-1]*params[nparams-1]); sig = abs(params[0])* (temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); }else{ sig = abs(params[0])*((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); } myJ[0] = sig/params[0]; } for (int p=0;p<nparams;p++){ for (int p2=p;p2<nparams;p2++){ reduction[idSubVOX]=2*(myJ[p]*myJ[p2]); __syncthreads(); if(idSubVOX==0){ for(int i=0;i<THREADS_BLOCK_FIT;i++){ hess[p*nparams+p2] += reduction[i]; } } __syncthreads(); } } dir_iter+=THREADS_BLOCK_FIT; } if(idSubVOX==0){ for (int j=0; j<nparams; j++) { for (int i=j+1; i<nparams; i++) { hess[i*nparams+j]=hess[j*nparams+i]; } } } } //in diffmodel.cc extern "C" __global__ void fit_PVM_multi_kernel( //INPUT const float* data, const float* params_PVM_single_c, const float* bvecs, const float* bvals, const float R, const float invR, const int nvox, const int ndirections, const int nfib, const int nparams, const int Gamma_for_ball_only, const bool m_include_f0, const bool gradnonlin, //OUTPUT float* params) { int idSubVOX = threadIdx.x; int idVOX = blockIdx.x; int threadsBlock = blockDim.x; ////////// DYNAMIC SHARED MEMORY /////////// extern __shared__ double shared[]; double* pcf = (double*) shared; //1 double* ncf = (double*) &pcf[1]; //1 double* lambda = (double*) &ncf[1]; //1 double* cftol = (double*) &lambda[1]; //1 double* ltol = (double*) &cftol[1]; //1 double* olambda = (double*) &ltol[1]; //1 float* J = (float*)&olambda[1]; //threadsBlock*nparams float* reduction = (float*)&J[threadsBlock*nparams]; //threadsBlock float* myparams = (float*) &reduction[threadsBlock]; //nparams float* grad = (float*) &myparams[nparams]; //nparams float* hess = (float*) &grad[nparams]; //nparams*nparams float* step = (float*) &hess[nparams*nparams]; //nparams float* inverse = (float*) &step[nparams]; //nparams float* fs = (float*) &inverse[nparams]; //nfib float* x = (float*) &fs[nfib]; //nfib*3 float* _a = (float*) &x[nfib*3]; //1 float* _b = (float*) &_a[1]; //1 float* sumf = (float*) &_b[1]; //1 float* C = (float*)&sumf[1]; //nparams*nparams; float* el = (float*)&C[nparams*nparams]; //nparams int* indx = (int*)&el[nparams]; //nparams int* success = (int*) &indx[nparams]; //1 int* end = (int*) &success[1]; //1 ////////// DYNAMIC SHARED MEMORY /////////// if(idSubVOX==0){ int nparams_single_c = nparams-1; myparams[0] = params_PVM_single_c[(idVOX*nparams_single_c)+0]; //pvm1.get_s0(); myparams[1] = 1.0; //start with d=d_std for(int i=0,ii=3;i<nfib;i++,ii+=3){ myparams[ii] = f2x_gpu(params_PVM_single_c[(idVOX*nparams_single_c)+ii-1]); myparams[ii+1] = params_PVM_single_c[(idVOX*nparams_single_c)+ii]; myparams[ii+2] = params_PVM_single_c[(idVOX*nparams_single_c)+ii+1]; } myparams[2] = params_PVM_single_c[(idVOX*nparams_single_c)+1] ; //pvm1.get_d(); if (m_include_f0) myparams[nparams-1]=f2x_gpu(params_PVM_single_c[(idVOX*nparams_single_c)+nparams_single_c-1]); } __syncthreads(); int pos_bvals, pos_bvecs; if(gradnonlin){ pos_bvals=idVOX*ndirections; pos_bvecs=idVOX*3*ndirections; }else{ pos_bvals=0; pos_bvecs=0; } //do the fit levenberg_marquardt_PVM_multi_gpu(&data[idVOX*ndirections],&bvecs[pos_bvecs],&bvals[pos_bvals],R,invR, ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only, step,grad,hess,inverse, pcf,ncf,lambda,cftol,ltol,olambda,success,end,J, reduction,fs,x,_a,_b,sumf,C,el,indx,myparams); __syncthreads(); // finalise parameters //m_s0-myparams[0] m_d-myparams[1] m_d_std-myparams[2] m_f-m_th-m_ph-myparams[3,4,5,6 etc..] m_f0-myparams[nparams-1] if(idSubVOX==0){ float aux = myparams[1]; myparams[1] = abs(aux*myparams[2]); myparams[2] = sqrt(float(abs(aux*myparams[2]*myparams[2]))); for(int i=3,k=0;k<nfib;i+=3,k++){ myparams[i] = x2f_gpu(myparams[i]); } if (m_include_f0) myparams[nparams-1]=x2f_gpu(myparams[nparams-1]); sort_PVM_multi(nfib,myparams); fix_fsum_PVM_multi(m_include_f0,nfib,nparams,myparams); } __syncthreads(); if(idSubVOX<nparams){ params[(idVOX*nparams)+idSubVOX] = myparams[idSubVOX]; } } //in diffmodel.cc extern "C" __global__ void get_residuals_PVM_multi_kernel( //INPUT const float* data, const float* params, const float* bvecs, const float* bvals, const float R, const float invR, const int nvox, const int ndirections, const int nfib, const int nparams, const int Gamma_for_ball_only, const bool m_include_f0, const bool gradnonlin, const bool* includes_f0, //OUTPUT float* residuals) { int idSubVOX = threadIdx.x; int idVOX = blockIdx.x; ////////// DYNAMIC SHARED MEMORY /////////// extern __shared__ double shared[]; float* myparams = (float*) shared; //nparams float* fs = (float*) &myparams[nparams]; //nfib float* x = (float*) &fs[nfib]; //nfib*3 float* _a = (float*) &x[nfib*3]; //1 float* _b = (float*) &_a[1]; //1 float* sumf = (float*) &_b[1]; //1 int* my_include_f0 = (int*) &sumf[1]; //1 ////////// DYNAMIC SHARED MEMORY /////////// float val; float predicted_signal; float mydata; if(idSubVOX==0){ *my_include_f0 = includes_f0[idVOX]; //m_s0-myparams[0] m_d-myparams[1] m_d_std-myparams[2] m_f-m_th-m_ph-myparams[3,4,5,6 etc..] m_f0-myparams[nparams-1] myparams[0] = params[(idVOX*nparams)+0]; float aux1 = params[(idVOX*nparams)+1]; float aux2 = params[(idVOX*nparams)+2]; myparams[1] = aux1*aux1/aux2/aux2; //m_d*m_d/m_d_std/m_d_std; myparams[2] = aux2*aux2/aux1; //m_d_std*m_d_std/m_d; // =1/beta if (*my_include_f0) myparams[nparams-1]=f2x_gpu(params[(idVOX*nparams)+nparams-1]); } if(idSubVOX<nfib){ int kk = 3+3*idSubVOX; float sinth,costh,sinph,cosph; myparams[kk] = f2x_gpu(params[(idVOX*nparams)+kk]); myparams[kk+1] = params[(idVOX*nparams)+kk+1]; myparams[kk+2] = params[(idVOX*nparams)+kk+2]; sincos(myparams[kk+1],&sinth,&costh); sincos(myparams[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(myparams[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } __syncthreads(); if(idSubVOX==0){ *_a = abs(myparams[1]); *_b = abs(myparams[2]); *sumf=0; for(int k=0;k<nfib;k++){ *sumf += fs[k]; } } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; float3 x2; int dir_iter=idSubVOX; __syncthreads(); int pos_bvals, pos_bvecs; if(gradnonlin){ pos_bvals=idVOX*ndirections; pos_bvecs=idVOX*3*ndirections; }else{ pos_bvals=0; pos_bvecs=0; } for(int dir=0;dir<ndir;dir++){ mydata = data[(idVOX*ndirections)+dir_iter]; predicted_signal=0; //pred = 0; val = 0.0; for(int k=0;k<nfib;k++){ x2.x=x[k*3]; x2.y=x[k*3+1]; x2.z=x[k*3+2]; val += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,x2,&bvecs[pos_bvecs],&bvals[pos_bvals],R,invR,ndirections,Gamma_for_ball_only); } if (*my_include_f0){ float temp_f0=x2f_gpu(myparams[nparams-1]); predicted_signal = abs(myparams[0])*(temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,&bvals[pos_bvals])+val); }else{ predicted_signal = abs(myparams[0])*((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,&bvals[pos_bvals])+val); } //residuals=m_data-predicted_signal; residuals[idVOX*ndirections+dir_iter]= mydata - predicted_signal; dir_iter+=THREADS_BLOCK_FIT; } }
8c8dc39be7c22bf665bee9777ae63986abaadfb9.cu
/* PVM_multi.cu Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group Copyright (C) 2005 University of Oxford */ /* Part of FSL - FMRIB's Software Library http://www.fmrib.ox.ac.uk/fsl [email protected] Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance Imaging of the Brain), Department of Clinical Neurology, Oxford University, Oxford, UK LICENCE FMRIB Software Library, Release 5.0 (c) 2012, The University of Oxford (the "Software") The Software remains the property of the University of Oxford ("the University"). The Software is distributed "AS IS" under this Licence solely for non-commercial use in the hope that it will be useful, but in order that the University as a charitable foundation protects its assets for the benefit of its educational and research purposes, the University makes clear that no condition is made or to be implied, nor is any warranty given or to be implied, as to the accuracy of the Software, or that it will be suitable for any particular purpose or for use under any specific conditions. Furthermore, the University disclaims all responsibility for the use which is made of the Software. It further disclaims any liability for the outcomes arising from using the Software. The Licensee agrees to indemnify the University and hold the University harmless from and against any and all claims, damages and liabilities asserted by third parties (including claims for negligence) which arise directly or indirectly from the use of the Software or the sale of any products based on the Software. No part of the Software may be reproduced, modified, transmitted or transferred in any form or by any means, electronic or mechanical, without the express permission of the University. The permission of the University is not required if the said reproduction, modification, transmission or transference is done without financial return, the conditions of this Licence are imposed upon the receiver of the product, and all original and amended source code is included in any transmitted product. You may be held legally responsible for any copyright infringement that is caused or encouraged by your failure to abide by these terms and conditions. You are not permitted under this Licence to use this Software commercially. Use for which any financial return is received shall be defined as commercial use, and includes (1) integration of all or part of the source code or the Software into a product for sale or license by or on behalf of Licensee to third parties or (2) use of the Software or any derivative of it for research with the final aim of developing software products for sale or license to a third party or (3) use of the Software or any derivative of it for research with the final aim of developing non-software products for sale or license to a third party, or (4) use of the Software to provide any service to an external organisation for which payment is received. If you are interested in using the Software commercially, please contact Isis Innovation Limited ("Isis"), the technology transfer company of the University, to negotiate a licence. Contact details are: [email protected] quoting reference DE/9564. */ #include "diffmodels_utils.h" #include "levenberg_marquardt.cu" #include "options.h" ///////////////////////////////////// ///////////////////////////////////// /// PVM_multi /// ///////////////////////////////////// ///////////////////////////////////// __device__ inline float isoterm_PVM_multi(const int pt,const float* _a,const float* _b, const float *bvals){ return exp(-*_a*log(1+bvals[pt]**_b)); } __device__ inline float isoterm_a_PVM_multi(const int pt,const float* _a,const float* _b, const float *bvals){ return -log(1+bvals[pt]**_b)*exp(-*_a*log(1+bvals[pt]**_b)); } __device__ inline float isoterm_b_PVM_multi(const int pt,const float* _a,const float* _b, const float *bvals){ return -*_a*bvals[pt]/(1+bvals[pt]**_b)*exp(-*_a*log(1+bvals[pt]**_b)); } __device__ inline float anisoterm_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; if(Gamma_for_ball_only==1){ return exp(-bvals[pt]**_a**_b*dp*dp); }else if(Gamma_for_ball_only==2){ return exp(-bvals[pt]*3**_a**_b*invR*((1-R)*dp*dp+R)); }else{ return exp(-*_a*log(1+bvals[pt]**_b*(dp*dp))); } } __device__ inline float anisoterm_a_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; if(Gamma_for_ball_only==1){ return (-bvals[pt]**_b*dp*dp* exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=bvals[pt]*3**_b*invR*((1-R)*dp*dp+R); return(-dp2*exp(-dp2**_a)); }else{ return -log(1+bvals[pt]*(dp*dp)**_b)* exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b)); } } __device__ inline float anisoterm_b_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; if(Gamma_for_ball_only==1){ return(-bvals[pt]**_a*dp*dp*exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=bvals[pt]*3**_a*invR*((1-R)*dp*dp+R); return(-dp2*exp(-dp2**_b)); }else{ return (-*_a*bvals[pt]*(dp*dp)/ (1+bvals[pt]*(dp*dp)**_b)*exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b))); } } __device__ inline float anisoterm_th_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float _th,const float _ph,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float sinth,costh,sinph,cosph; sincos(_th,&sinth,&costh); sincos(_ph,&sinph,&cosph); float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; float dp1 = costh* (bvecs[pt]*cosph + bvecs[ndirections+pt]*sinph) - bvecs[(2*ndirections)+pt]*sinth; if(Gamma_for_ball_only==1){ return(-2*bvals[pt]**_a**_b*dp*dp1*exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=2*bvals[pt]*3**_a**_b*invR*(1-R)*dp1; return(-dp2*exp(-bvals[pt]*3**_a**_b*invR*((1-R)*dp*dp+R))); }else{ return (-*_a**_b*bvals[pt]/(1+bvals[pt]*(dp*dp)**_b)*exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b))*2*dp*dp1); } } __device__ inline float anisoterm_ph_PVM_multi(const int pt,const float* _a,const float* _b,const float3 x,const float _th,const float _ph,const float *bvecs, const float *bvals, const float R, const float invR, const int ndirections,const int Gamma_for_ball_only){ float sinth,sinph,cosph; sinth=sin(_th); sincos(_ph,&sinph,&cosph); float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z; float dp1 = sinth* (-bvecs[pt]*sinph + bvecs[ndirections+pt]*cosph); if(Gamma_for_ball_only==1){ return(-2*bvals[pt]**_a**_b*dp*dp1*exp(-bvals[pt]**_a**_b*dp*dp)); }else if(Gamma_for_ball_only==2){ float dp2=2*bvals[pt]*3**_a**_b*invR*(1-R)*dp1; return(-dp2*exp(-bvals[pt]*3**_a**_b*invR*((1-R)*dp*dp+R))); }else{ return (-*_a**_b*bvals[pt]/(1+bvals[pt]*(dp*dp)**_b)*exp(-*_a*log(1+bvals[pt]*(dp*dp)**_b))*2*dp*dp1); } } //in diffmodel.cc __device__ void fix_fsum_PVM_multi( //INPUT bool m_include_f0, int nfib, int nparams, //INPUT - OUTPUT){ float *params) { float sumf=0; if (m_include_f0) sumf=params[nparams-1]; for(int i=0;i<nfib;i++){ if (params[3+(i*3)]==0) params[3+(i*3)]=FSMALL_gpu; sumf+=params[3+(i*3)]; if(sumf>=1){ for(int j=i;j<nfib;j++) params[3+(j*3)]=FSMALL_gpu; break; } } } //in diffmodel.cc __device__ void sort_PVM_multi(int nfib,float* params) { float temp_f, temp_th, temp_ph; // Order vector descending using f parameters as index for(int i=1; i<(nfib); i++){ for(int j=0; j<(nfib-i); j++){ if (params[3+j*3] < params[3+(j+1)*3]){ temp_f = params[3+j*3]; temp_th = params[3+j*3+1]; temp_ph = params[3+j*3+2]; params[3+j*3] = params[3+(j+1)*3]; params[3+j*3+1] = params[3+(j+1)*3+1]; params[3+j*3+2] = params[3+(j+1)*3+2]; params[3+(j+1)*3] = temp_f; params[3+(j+1)*3+1] = temp_th; params[3+(j+1)*3+2] = temp_ph; } } } } //cost function PVM_multi __device__ void cf_PVM_multi( //INPUT const float* params, const float* mdata, const float* bvecs, const float* bvals, const float R, const float invR, const int ndirections, const int nfib, const int nparams, const bool m_include_f0, const int idSubVOX, const int Gamma_for_ball_only, float* reduction, //shared memory float* fs, //shared memory float* x, //shared memory float* _a, //shared memory float* _b, //shared memory float* sumf, //shared memory //OUTPUT double* cfv) { if(idSubVOX<nfib){ int kk = 3+3*(idSubVOX); float sinth,costh,sinph,cosph; sincos(params[kk+1],&sinth,&costh); sincos(params[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(params[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } if(idSubVOX==0){ *_a= abs(params[1]); *_b= abs(params[2]); *cfv = 0.0; *sumf=0; for(int k=0;k<nfib;k++) *sumf+= fs[k]; } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; float err; float3 x2; int dir_iter=idSubVOX; __syncthreads(); reduction[idSubVOX]=0; for(int dir=0;dir<ndir;dir++){ err = 0.0; for(int k=0;k<nfib;k++){ x2.x=x[k*3]; x2.y=x[k*3+1]; x2.z=x[k*3+2]; err += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,x2,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); } if(m_include_f0){ float temp_f0=x2f_gpu(params[nparams-1]); err = (abs(params[0])*(temp_f0+((1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+err)))-mdata[dir_iter]; }else{ err = abs(params[0])*((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+err)-mdata[dir_iter]; } reduction[idSubVOX]+= err*err; dir_iter+=THREADS_BLOCK_FIT; } __syncthreads(); if(idSubVOX==0){ for(int i=0;i<THREADS_BLOCK_FIT;i++){ *cfv+=reduction[i]; } } } //gradient function PVM_multi __device__ void grad_PVM_multi( //INPUT const float* params, const float* mdata, const float* bvecs, const float* bvals, const float R, const float invR, const int ndirections, const int nfib, const int nparams, const bool m_include_f0, const int idSubVOX, const int Gamma_for_ball_only, float* J, //shared memory float* reduction, //shared memory float* fs, //shared memory float* x, //shared memory float* _a, //shared memory float* _b, //shared memory float* sumf, //shared memory //OUTPUT float* grad) { if(idSubVOX<nfib){ int kk = 3+3*(idSubVOX); float sinth,costh,sinph,cosph; sincos(params[kk+1],&sinth,&costh); sincos(params[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(params[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } if(idSubVOX==0){ *_a= abs(params[1]); *_b= abs(params[2]); *sumf=0; for(int k=0;k<nfib;k++) *sumf+= fs[k]; for (int p=0;p<nparams;p++) grad[p]=0; } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; int max_dir = ndirections/THREADS_BLOCK_FIT; if(ndirections%THREADS_BLOCK_FIT) max_dir++; float* myJ = &J[idSubVOX*nparams]; float diff; float sig; float3 xx; int dir_iter=idSubVOX; __syncthreads(); for(int dir=0;dir<max_dir;dir++){ for (int p=0; p<nparams; p++) myJ[p]=0; if(dir<ndir){ sig = 0; for(int k=0;k<nfib;k++){ int kk = 3+3*(k); xx.x=x[k*3]; xx.y=x[k*3+1]; xx.z=x[k*3+2]; sig += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_a_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_b_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk] = abs(params[0])*(anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only) -isoterm_PVM_multi(dir_iter,_a,_b,bvals))*two_pi_gpu*sign_gpu(params[kk])*1/(1+params[kk]*params[kk]); myJ[kk+1] = abs(params[0])*fs[k]* anisoterm_th_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk+2] = abs(params[0])*fs[k]* anisoterm_ph_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); } if(m_include_f0){ float temp_f0=x2f_gpu(params[nparams-1]); myJ[nparams-1]= abs(params[0])*(1-isoterm_PVM_multi(dir_iter,_a,_b,bvals))* two_pi_gpu*sign_gpu(params[nparams-1])*1/(1+params[nparams-1]*params[nparams-1]); sig=abs(params[0])*((temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,bvals))+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); }else{ sig = abs(params[0]) * ((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); } diff = sig - mdata[dir_iter]; myJ[0] = (params[0]>0?1.0:-1.0)*sig/params[0]; } for (int p=0;p<nparams;p++){ reduction[idSubVOX]=2*myJ[p]*diff; __syncthreads(); if(idSubVOX==0){ for(int i=0;i<THREADS_BLOCK_FIT;i++){ grad[p] += reduction[i]; } } __syncthreads(); } dir_iter+=THREADS_BLOCK_FIT; } } //hessian function PVM_multi __device__ void hess_PVM_multi( //INPUT const float* params, const float* bvecs, const float* bvals, const float R, const float invR, const int ndirections, const int nfib, const int nparams, const bool m_include_f0, const int idSubVOX, const int Gamma_for_ball_only, float* J, //shared memory float* reduction, //shared memory float* fs, //shared memory float* x, //shared memory float* _a, //shared memory float* _b, //shared memory float* sumf, //shared memory //OUTPUT float* hess) { if(idSubVOX<nfib){ int kk = 3+3*(idSubVOX); float sinth,costh,sinph,cosph; sincos(params[kk+1],&sinth,&costh); sincos(params[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(params[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } if(idSubVOX==0){ *_a= abs(params[1]); *_b= abs(params[2]); *sumf=0; for(int k=0;k<nfib;k++) *sumf+= fs[k]; for (int p=0;p<nparams;p++){ for (int p2=0;p2<nparams;p2++){ hess[p*nparams+p2] = 0; } } } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; int max_dir = ndirections/THREADS_BLOCK_FIT; if(ndirections%THREADS_BLOCK_FIT) max_dir++; float* myJ = &J[idSubVOX*nparams]; float sig; float3 xx; int dir_iter=idSubVOX; __syncthreads(); for(int dir=0;dir<max_dir;dir++){ for (int p=0; p<nparams; p++) myJ[p]=0; if(dir<ndir){ sig = 0; for(int k=0;k<nfib;k++){ int kk = 3+3*(k); xx.x=x[k*3]; xx.y=x[k*3+1]; xx.z=x[k*3+2]; sig += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); float cov = two_pi_gpu*sign_gpu(params[kk])*1/(1+params[kk]*params[kk]); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_a_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*fs[k]* anisoterm_b_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk] = abs(params[0])* (anisoterm_PVM_multi(dir_iter,_a,_b,xx,bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only)- isoterm_PVM_multi(dir_iter,_a,_b,bvals))*cov; myJ[kk+1] = abs(params[0])*fs[k]* anisoterm_th_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); myJ[kk+2] = abs(params[0])*fs[k]* anisoterm_ph_PVM_multi(dir_iter,_a,_b,xx,params[kk+1],params[kk+2],bvecs,bvals,R,invR,ndirections,Gamma_for_ball_only); } if(m_include_f0){ float temp_f0=x2f_gpu(params[nparams-1]); myJ[nparams-1]= abs(params[0])*(1-isoterm_PVM_multi(dir_iter,_a,_b,bvals))*two_pi_gpu*sign_gpu(params[nparams-1])*1/(1+params[nparams-1]*params[nparams-1]); sig = abs(params[0])* (temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf-temp_f0)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); }else{ sig = abs(params[0])*((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,bvals)+sig); myJ[1] += (params[1]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_a_PVM_multi(dir_iter,_a,_b,bvals); myJ[2] += (params[2]>0?1.0:-1.0)*abs(params[0])*(1-*sumf)*isoterm_b_PVM_multi(dir_iter,_a,_b,bvals); } myJ[0] = sig/params[0]; } for (int p=0;p<nparams;p++){ for (int p2=p;p2<nparams;p2++){ reduction[idSubVOX]=2*(myJ[p]*myJ[p2]); __syncthreads(); if(idSubVOX==0){ for(int i=0;i<THREADS_BLOCK_FIT;i++){ hess[p*nparams+p2] += reduction[i]; } } __syncthreads(); } } dir_iter+=THREADS_BLOCK_FIT; } if(idSubVOX==0){ for (int j=0; j<nparams; j++) { for (int i=j+1; i<nparams; i++) { hess[i*nparams+j]=hess[j*nparams+i]; } } } } //in diffmodel.cc extern "C" __global__ void fit_PVM_multi_kernel( //INPUT const float* data, const float* params_PVM_single_c, const float* bvecs, const float* bvals, const float R, const float invR, const int nvox, const int ndirections, const int nfib, const int nparams, const int Gamma_for_ball_only, const bool m_include_f0, const bool gradnonlin, //OUTPUT float* params) { int idSubVOX = threadIdx.x; int idVOX = blockIdx.x; int threadsBlock = blockDim.x; ////////// DYNAMIC SHARED MEMORY /////////// extern __shared__ double shared[]; double* pcf = (double*) shared; //1 double* ncf = (double*) &pcf[1]; //1 double* lambda = (double*) &ncf[1]; //1 double* cftol = (double*) &lambda[1]; //1 double* ltol = (double*) &cftol[1]; //1 double* olambda = (double*) &ltol[1]; //1 float* J = (float*)&olambda[1]; //threadsBlock*nparams float* reduction = (float*)&J[threadsBlock*nparams]; //threadsBlock float* myparams = (float*) &reduction[threadsBlock]; //nparams float* grad = (float*) &myparams[nparams]; //nparams float* hess = (float*) &grad[nparams]; //nparams*nparams float* step = (float*) &hess[nparams*nparams]; //nparams float* inverse = (float*) &step[nparams]; //nparams float* fs = (float*) &inverse[nparams]; //nfib float* x = (float*) &fs[nfib]; //nfib*3 float* _a = (float*) &x[nfib*3]; //1 float* _b = (float*) &_a[1]; //1 float* sumf = (float*) &_b[1]; //1 float* C = (float*)&sumf[1]; //nparams*nparams; float* el = (float*)&C[nparams*nparams]; //nparams int* indx = (int*)&el[nparams]; //nparams int* success = (int*) &indx[nparams]; //1 int* end = (int*) &success[1]; //1 ////////// DYNAMIC SHARED MEMORY /////////// if(idSubVOX==0){ int nparams_single_c = nparams-1; myparams[0] = params_PVM_single_c[(idVOX*nparams_single_c)+0]; //pvm1.get_s0(); myparams[1] = 1.0; //start with d=d_std for(int i=0,ii=3;i<nfib;i++,ii+=3){ myparams[ii] = f2x_gpu(params_PVM_single_c[(idVOX*nparams_single_c)+ii-1]); myparams[ii+1] = params_PVM_single_c[(idVOX*nparams_single_c)+ii]; myparams[ii+2] = params_PVM_single_c[(idVOX*nparams_single_c)+ii+1]; } myparams[2] = params_PVM_single_c[(idVOX*nparams_single_c)+1] ; //pvm1.get_d(); if (m_include_f0) myparams[nparams-1]=f2x_gpu(params_PVM_single_c[(idVOX*nparams_single_c)+nparams_single_c-1]); } __syncthreads(); int pos_bvals, pos_bvecs; if(gradnonlin){ pos_bvals=idVOX*ndirections; pos_bvecs=idVOX*3*ndirections; }else{ pos_bvals=0; pos_bvecs=0; } //do the fit levenberg_marquardt_PVM_multi_gpu(&data[idVOX*ndirections],&bvecs[pos_bvecs],&bvals[pos_bvals],R,invR, ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only, step,grad,hess,inverse, pcf,ncf,lambda,cftol,ltol,olambda,success,end,J, reduction,fs,x,_a,_b,sumf,C,el,indx,myparams); __syncthreads(); // finalise parameters //m_s0-myparams[0] m_d-myparams[1] m_d_std-myparams[2] m_f-m_th-m_ph-myparams[3,4,5,6 etc..] m_f0-myparams[nparams-1] if(idSubVOX==0){ float aux = myparams[1]; myparams[1] = abs(aux*myparams[2]); myparams[2] = sqrt(float(abs(aux*myparams[2]*myparams[2]))); for(int i=3,k=0;k<nfib;i+=3,k++){ myparams[i] = x2f_gpu(myparams[i]); } if (m_include_f0) myparams[nparams-1]=x2f_gpu(myparams[nparams-1]); sort_PVM_multi(nfib,myparams); fix_fsum_PVM_multi(m_include_f0,nfib,nparams,myparams); } __syncthreads(); if(idSubVOX<nparams){ params[(idVOX*nparams)+idSubVOX] = myparams[idSubVOX]; } } //in diffmodel.cc extern "C" __global__ void get_residuals_PVM_multi_kernel( //INPUT const float* data, const float* params, const float* bvecs, const float* bvals, const float R, const float invR, const int nvox, const int ndirections, const int nfib, const int nparams, const int Gamma_for_ball_only, const bool m_include_f0, const bool gradnonlin, const bool* includes_f0, //OUTPUT float* residuals) { int idSubVOX = threadIdx.x; int idVOX = blockIdx.x; ////////// DYNAMIC SHARED MEMORY /////////// extern __shared__ double shared[]; float* myparams = (float*) shared; //nparams float* fs = (float*) &myparams[nparams]; //nfib float* x = (float*) &fs[nfib]; //nfib*3 float* _a = (float*) &x[nfib*3]; //1 float* _b = (float*) &_a[1]; //1 float* sumf = (float*) &_b[1]; //1 int* my_include_f0 = (int*) &sumf[1]; //1 ////////// DYNAMIC SHARED MEMORY /////////// float val; float predicted_signal; float mydata; if(idSubVOX==0){ *my_include_f0 = includes_f0[idVOX]; //m_s0-myparams[0] m_d-myparams[1] m_d_std-myparams[2] m_f-m_th-m_ph-myparams[3,4,5,6 etc..] m_f0-myparams[nparams-1] myparams[0] = params[(idVOX*nparams)+0]; float aux1 = params[(idVOX*nparams)+1]; float aux2 = params[(idVOX*nparams)+2]; myparams[1] = aux1*aux1/aux2/aux2; //m_d*m_d/m_d_std/m_d_std; myparams[2] = aux2*aux2/aux1; //m_d_std*m_d_std/m_d; // =1/beta if (*my_include_f0) myparams[nparams-1]=f2x_gpu(params[(idVOX*nparams)+nparams-1]); } if(idSubVOX<nfib){ int kk = 3+3*idSubVOX; float sinth,costh,sinph,cosph; myparams[kk] = f2x_gpu(params[(idVOX*nparams)+kk]); myparams[kk+1] = params[(idVOX*nparams)+kk+1]; myparams[kk+2] = params[(idVOX*nparams)+kk+2]; sincos(myparams[kk+1],&sinth,&costh); sincos(myparams[kk+2],&sinph,&cosph); fs[idSubVOX] = x2f_gpu(myparams[kk]); x[idSubVOX*3] = sinth*cosph; x[idSubVOX*3+1] = sinth*sinph; x[idSubVOX*3+2] = costh; } __syncthreads(); if(idSubVOX==0){ *_a = abs(myparams[1]); *_b = abs(myparams[2]); *sumf=0; for(int k=0;k<nfib;k++){ *sumf += fs[k]; } } int ndir = ndirections/THREADS_BLOCK_FIT; if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++; float3 x2; int dir_iter=idSubVOX; __syncthreads(); int pos_bvals, pos_bvecs; if(gradnonlin){ pos_bvals=idVOX*ndirections; pos_bvecs=idVOX*3*ndirections; }else{ pos_bvals=0; pos_bvecs=0; } for(int dir=0;dir<ndir;dir++){ mydata = data[(idVOX*ndirections)+dir_iter]; predicted_signal=0; //pred = 0; val = 0.0; for(int k=0;k<nfib;k++){ x2.x=x[k*3]; x2.y=x[k*3+1]; x2.z=x[k*3+2]; val += fs[k]*anisoterm_PVM_multi(dir_iter,_a,_b,x2,&bvecs[pos_bvecs],&bvals[pos_bvals],R,invR,ndirections,Gamma_for_ball_only); } if (*my_include_f0){ float temp_f0=x2f_gpu(myparams[nparams-1]); predicted_signal = abs(myparams[0])*(temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_multi(dir_iter,_a,_b,&bvals[pos_bvals])+val); }else{ predicted_signal = abs(myparams[0])*((1-*sumf)*isoterm_PVM_multi(dir_iter,_a,_b,&bvals[pos_bvals])+val); } //residuals=m_data-predicted_signal; residuals[idVOX*ndirections+dir_iter]= mydata - predicted_signal; dir_iter+=THREADS_BLOCK_FIT; } }
339bbbf4785888dced7852698e09a544177d748b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Mark Gates @author Azzam Haidar @precisions normal z -> s d c */ #include "magma_internal.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void zlaset_full_device( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_Z_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to zlaset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlaset_lower_device( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to zlaset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlaset_upper_device( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void zlaset_full_kernel( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *dA, int ldda ) { zlaset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void zlaset_lower_kernel( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *dA, int ldda ) { zlaset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void zlaset_upper_kernel( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *dA, int ldda ) { zlaset_upper_device(m, n, offdiag, diag, dA, ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the batched routine. */ __global__ void zlaset_full_kernel_batched( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, int ldda ) { int batchid = blockIdx.z; zlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void zlaset_lower_kernel_batched( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, int ldda ) { int batchid = blockIdx.z; zlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void zlaset_upper_kernel_batched( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, int ldda ) { int batchid = blockIdx.z; zlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the vbatched routine. */ __global__ void zlaset_full_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; zlaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void zlaset_lower_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; zlaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void zlaset_upper_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; zlaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } /***************************************************************************//** Purpose ------- ZLASET initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag COMPLEX_16 The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag COMPLEX_16 The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laset *******************************************************************************/ extern "C" void magmablas_zlaset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( zlaset_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( zlaset_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, hipMemset is faster. // TODO: use hipMemset2D ? if ( m == ldda && MAGMA_Z_EQUAL( offdiag, MAGMA_Z_ZERO ) && MAGMA_Z_EQUAL( diag, MAGMA_Z_ZERO ) ) { size_t size = m*n; hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(magmaDoubleComplex), queue->cuda_stream() ); assert( err == hipSuccess ); MAGMA_UNUSED( err ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /******************************************************************************/ extern "C" void magmablas_zlaset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), ibatch ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( zlaset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray+i, ldda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( zlaset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray+i, ldda); } else { hipLaunchKernelGGL(( zlaset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray+i, ldda); } } } /******************************************************************************/ extern "C" void magmablas_zlaset_vbatched( magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n, magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex_ptr dAarray[], magma_int_t* ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( max_m < 0 ) info = -2; else if ( max_n < 0 ) info = -3; //else if ( ldda < max(1,m) ) // info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( max_m == 0 || max_n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), ibatch ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( zlaset_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m+i, n+i, offdiag, diag, dAarray+i, ldda+i); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( zlaset_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m+i, n+i, offdiag, diag, dAarray+i, ldda+i); } else { hipLaunchKernelGGL(( zlaset_full_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m+i, n+i, offdiag, diag, dAarray+i, ldda+i); } } }
339bbbf4785888dced7852698e09a544177d748b.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @author Mark Gates @author Azzam Haidar @precisions normal z -> s d c */ #include "magma_internal.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void zlaset_full_device( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_Z_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to zlaset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlaset_lower_device( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to zlaset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlaset_upper_device( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void zlaset_full_kernel( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *dA, int ldda ) { zlaset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void zlaset_lower_kernel( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *dA, int ldda ) { zlaset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void zlaset_upper_kernel( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex *dA, int ldda ) { zlaset_upper_device(m, n, offdiag, diag, dA, ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the batched routine. */ __global__ void zlaset_full_kernel_batched( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, int ldda ) { int batchid = blockIdx.z; zlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void zlaset_lower_kernel_batched( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, int ldda ) { int batchid = blockIdx.z; zlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void zlaset_upper_kernel_batched( int m, int n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, int ldda ) { int batchid = blockIdx.z; zlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the vbatched routine. */ __global__ void zlaset_full_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; zlaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void zlaset_lower_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; zlaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void zlaset_upper_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; zlaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } /***************************************************************************//** Purpose ------- ZLASET initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag COMPLEX_16 The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag COMPLEX_16 The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laset *******************************************************************************/ extern "C" void magmablas_zlaset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block zlaset_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block zlaset_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, cudaMemset is faster. // TODO: use cudaMemset2D ? if ( m == ldda && MAGMA_Z_EQUAL( offdiag, MAGMA_Z_ZERO ) && MAGMA_Z_EQUAL( diag, MAGMA_Z_ZERO ) ) { size_t size = m*n; cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(magmaDoubleComplex), queue->cuda_stream() ); assert( err == cudaSuccess ); MAGMA_UNUSED( err ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /******************************************************************************/ extern "C" void magmablas_zlaset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), ibatch ); if (uplo == MagmaLower) { zlaset_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray+i, ldda); } else if (uplo == MagmaUpper) { zlaset_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray+i, ldda); } else { zlaset_full_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray+i, ldda); } } } /******************************************************************************/ extern "C" void magmablas_zlaset_vbatched( magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n, magma_int_t* m, magma_int_t* n, magmaDoubleComplex offdiag, magmaDoubleComplex diag, magmaDoubleComplex_ptr dAarray[], magma_int_t* ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( max_m < 0 ) info = -2; else if ( max_n < 0 ) info = -3; //else if ( ldda < max(1,m) ) // info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( max_m == 0 || max_n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); magma_int_t max_batchCount = queue->get_maxBatch(); for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) { magma_int_t ibatch = min(max_batchCount, batchCount-i); dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), ibatch ); if (uplo == MagmaLower) { zlaset_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m+i, n+i, offdiag, diag, dAarray+i, ldda+i); } else if (uplo == MagmaUpper) { zlaset_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m+i, n+i, offdiag, diag, dAarray+i, ldda+i); } else { zlaset_full_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m+i, n+i, offdiag, diag, dAarray+i, ldda+i); } } }
9e1f97df0a884a91a17be9ab29565a207e2ad69b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "KNNBinDataV2.cuh" #include "CUDAGlobals.cuh" using namespace OpenSteer; extern "C" { // Bind the textures to the input hipArray. __host__ void KNNBinningV2BindTexture( hipArray * pCudaArray ); // Unbind the textures. __host__ void KNNBinningV2UnbindTexture( void ); // Use to precompute the neighbors of each cell once per decomposition. __global__ void KNNBinningV2ComputeCellNeighbors2D( bin_cell const* pdCells, // In: Cell data. uint * pdCellNeighbors, // Out: Array of computed cell neighbors. size_t const neighborsPerCell, // In: Number of neighbors per cell. int const radius, // In: Search radius. size_t const numCells // In: Number of cells. ); __global__ void KNNBinningV2ComputeCellNeighbors3D( bin_cell const* pdCells, // In: Cell data. uint * pdCellNeighbors, // Out: Array of computed cell neighbors. size_t const neighborsPerCell, // In: Number of neighbors per cell. uint const radius, // In: Search radius. size_t const numCells // In: Number of cells. ); } KNNBinDataV2::KNNBinDataV2( uint3 const& worldCells, float3 const& worldSize, uint const searchRadius ) : m_worldCells( worldCells ), m_worldSize( worldSize ), m_nSearchRadius( searchRadius ) { m_nCells = m_worldCells.x * m_worldCells.y * m_worldCells.z; // Create the cells. CreateCells(); // Compute the neighbors for the cells. ComputeCellNeighbors( m_worldCells.y > 1 ); } void KNNBinDataV2::ComputeCellNeighbors( bool b3D ) { dim3 grid = gridDim(); dim3 block = blockDim(); m_nNeighborsPerCell = ipow( (m_nSearchRadius * 2 + 1), (b3D ? 3 : 2) ); size_t const shMemSize = sizeof(uint) * KNN_THREADSPERBLOCK * m_nNeighborsPerCell; // Allocate enough device memory. m_dvCellNeighbors.resize( m_nCells * m_nNeighborsPerCell ); // Bind the texture. KNNBinningV2BindTexture( pdCellIndexArray() ); if( b3D ) { hipLaunchKernelGGL(( KNNBinningV2ComputeCellNeighbors3D), dim3(grid), dim3(block), shMemSize , 0, pdCells(), pdCellNeighbors(), m_nNeighborsPerCell, m_nSearchRadius, m_nCells ); } else { hipLaunchKernelGGL(( KNNBinningV2ComputeCellNeighbors2D), dim3(grid), dim3(block), shMemSize , 0, pdCells(), pdCellNeighbors(), m_nNeighborsPerCell, m_nSearchRadius, m_nCells ); } cutilCheckMsg( "KNNBinningComputeCellNeighbors failed." ); //CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Unbind the texture. KNNBinningV2UnbindTexture(); } void KNNBinDataV2::CreateCells( void ) { float3 const step = make_float3( m_worldSize.x / m_worldCells.x, // width m_worldSize.y / m_worldCells.y, // height m_worldSize.z / m_worldCells.z ); // depth float3 const stepNormalized = make_float3( step.x / m_worldSize.x, step.y / m_worldSize.y, step.z / m_worldSize.z ); /* Texture addressing in CUDA operates as follows. z| | y/ | / | / | / | / |/_________x */ size_t const numCells = m_worldCells.x * m_worldCells.y * m_worldCells.z; m_hvCells.resize( numCells ); // Allocate host memory to temporarily store the 3D texture data. uint * phCellIndices = (uint*)malloc( numCells * sizeof(uint) ); uint index = 0; for( size_t iHeight = 0; iHeight < m_worldCells.y; iHeight++ ) // height - texture z axis, world y axis { for( size_t iDepth = 0; iDepth < m_worldCells.z; iDepth++ ) // depth - texture y axis, world z axis { for( size_t iWidth = 0; iWidth < m_worldCells.x; iWidth++ ) // width - texture x axis, world x axis { // Make a bin_cell structure. bin_cell bc; //bc.iBinIndex = iBinIndex; bc.index = iWidth + (iDepth * m_worldCells.x) + (iHeight * m_worldCells.z * m_worldCells.x); // Set the offset value for the cell lookup texture. phCellIndices[index] = bc.index; // Set the minBounds of the cell. bc.minBound.x = iWidth * step.x - 0.5f * m_worldSize.x; bc.minBound.y = iHeight * step.y - 0.5f * m_worldSize.y; bc.minBound.z = iDepth * step.z - 0.5f * m_worldSize.z; // Set the position of the cell. bc.position.x = bc.minBound.x + 0.5f * step.x; bc.position.y = bc.minBound.y + 0.5f * step.y; bc.position.z = bc.minBound.z + 0.5f * step.z; // Set the maxBounds of the cell. bc.maxBound.x = bc.minBound.x + step.x; bc.maxBound.y = bc.minBound.y + step.y; bc.maxBound.z = bc.minBound.z + step.z; //m_hvCells.push_back( bc ); m_hvCells[index] = bc; index++; } } } // Transfer the bin_cell structures to the device memory. m_dvCells = m_hvCells; // Prepare the bin_cell index lookup texture. hipExtent const extent = make_hipExtent( m_worldCells.x, m_worldCells.y, m_worldCells.z ); hipChannelFormatDesc const desc = hipCreateChannelDesc< uint >(); hipPitchedPtr srcPtr = make_hipPitchedPtr( (void*)phCellIndices, extent.width * sizeof(uint), extent.width, extent.height ); // Allocate m_pdCellIndexArray. CUDA_SAFE_CALL( hipMalloc3DArray( &m_pdCellIndexArray, &desc, extent, hipArrayDefault ) ); // Copy data to 3D array. hipMemcpy3DParms copyParms = {0}; copyParms.srcPtr = srcPtr; copyParms.dstArray = m_pdCellIndexArray; copyParms.extent = extent; copyParms.kind = hipMemcpyHostToDevice; CUDA_SAFE_CALL( hipMemcpy3D( &copyParms ) ); // Copy the m_worldSize and m_worldCells values to constant memory. CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldSizeV2", &m_worldSize, sizeof(float3) ) ); CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldStepV2", &step, sizeof(float3) ) ); CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldStepNormalizedV2", &stepNormalized, sizeof(float3) ) ); CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldCellsV2", &m_worldCells, sizeof(uint3) ) ); // Free host memory. free( phCellIndices ); }
9e1f97df0a884a91a17be9ab29565a207e2ad69b.cu
#include "KNNBinDataV2.cuh" #include "CUDAGlobals.cuh" using namespace OpenSteer; extern "C" { // Bind the textures to the input cudaArray. __host__ void KNNBinningV2BindTexture( cudaArray * pCudaArray ); // Unbind the textures. __host__ void KNNBinningV2UnbindTexture( void ); // Use to precompute the neighbors of each cell once per decomposition. __global__ void KNNBinningV2ComputeCellNeighbors2D( bin_cell const* pdCells, // In: Cell data. uint * pdCellNeighbors, // Out: Array of computed cell neighbors. size_t const neighborsPerCell, // In: Number of neighbors per cell. int const radius, // In: Search radius. size_t const numCells // In: Number of cells. ); __global__ void KNNBinningV2ComputeCellNeighbors3D( bin_cell const* pdCells, // In: Cell data. uint * pdCellNeighbors, // Out: Array of computed cell neighbors. size_t const neighborsPerCell, // In: Number of neighbors per cell. uint const radius, // In: Search radius. size_t const numCells // In: Number of cells. ); } KNNBinDataV2::KNNBinDataV2( uint3 const& worldCells, float3 const& worldSize, uint const searchRadius ) : m_worldCells( worldCells ), m_worldSize( worldSize ), m_nSearchRadius( searchRadius ) { m_nCells = m_worldCells.x * m_worldCells.y * m_worldCells.z; // Create the cells. CreateCells(); // Compute the neighbors for the cells. ComputeCellNeighbors( m_worldCells.y > 1 ); } void KNNBinDataV2::ComputeCellNeighbors( bool b3D ) { dim3 grid = gridDim(); dim3 block = blockDim(); m_nNeighborsPerCell = ipow( (m_nSearchRadius * 2 + 1), (b3D ? 3 : 2) ); size_t const shMemSize = sizeof(uint) * KNN_THREADSPERBLOCK * m_nNeighborsPerCell; // Allocate enough device memory. m_dvCellNeighbors.resize( m_nCells * m_nNeighborsPerCell ); // Bind the texture. KNNBinningV2BindTexture( pdCellIndexArray() ); if( b3D ) { KNNBinningV2ComputeCellNeighbors3D<<< grid, block, shMemSize >>>( pdCells(), pdCellNeighbors(), m_nNeighborsPerCell, m_nSearchRadius, m_nCells ); } else { KNNBinningV2ComputeCellNeighbors2D<<< grid, block, shMemSize >>>( pdCells(), pdCellNeighbors(), m_nNeighborsPerCell, m_nSearchRadius, m_nCells ); } cutilCheckMsg( "KNNBinningComputeCellNeighbors failed." ); //CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Unbind the texture. KNNBinningV2UnbindTexture(); } void KNNBinDataV2::CreateCells( void ) { float3 const step = make_float3( m_worldSize.x / m_worldCells.x, // width m_worldSize.y / m_worldCells.y, // height m_worldSize.z / m_worldCells.z ); // depth float3 const stepNormalized = make_float3( step.x / m_worldSize.x, step.y / m_worldSize.y, step.z / m_worldSize.z ); /* Texture addressing in CUDA operates as follows. z| | y/ | / | / | / | / |/_________x */ size_t const numCells = m_worldCells.x * m_worldCells.y * m_worldCells.z; m_hvCells.resize( numCells ); // Allocate host memory to temporarily store the 3D texture data. uint * phCellIndices = (uint*)malloc( numCells * sizeof(uint) ); uint index = 0; for( size_t iHeight = 0; iHeight < m_worldCells.y; iHeight++ ) // height - texture z axis, world y axis { for( size_t iDepth = 0; iDepth < m_worldCells.z; iDepth++ ) // depth - texture y axis, world z axis { for( size_t iWidth = 0; iWidth < m_worldCells.x; iWidth++ ) // width - texture x axis, world x axis { // Make a bin_cell structure. bin_cell bc; //bc.iBinIndex = iBinIndex; bc.index = iWidth + (iDepth * m_worldCells.x) + (iHeight * m_worldCells.z * m_worldCells.x); // Set the offset value for the cell lookup texture. phCellIndices[index] = bc.index; // Set the minBounds of the cell. bc.minBound.x = iWidth * step.x - 0.5f * m_worldSize.x; bc.minBound.y = iHeight * step.y - 0.5f * m_worldSize.y; bc.minBound.z = iDepth * step.z - 0.5f * m_worldSize.z; // Set the position of the cell. bc.position.x = bc.minBound.x + 0.5f * step.x; bc.position.y = bc.minBound.y + 0.5f * step.y; bc.position.z = bc.minBound.z + 0.5f * step.z; // Set the maxBounds of the cell. bc.maxBound.x = bc.minBound.x + step.x; bc.maxBound.y = bc.minBound.y + step.y; bc.maxBound.z = bc.minBound.z + step.z; //m_hvCells.push_back( bc ); m_hvCells[index] = bc; index++; } } } // Transfer the bin_cell structures to the device memory. m_dvCells = m_hvCells; // Prepare the bin_cell index lookup texture. cudaExtent const extent = make_cudaExtent( m_worldCells.x, m_worldCells.y, m_worldCells.z ); cudaChannelFormatDesc const desc = cudaCreateChannelDesc< uint >(); cudaPitchedPtr srcPtr = make_cudaPitchedPtr( (void*)phCellIndices, extent.width * sizeof(uint), extent.width, extent.height ); // Allocate m_pdCellIndexArray. CUDA_SAFE_CALL( cudaMalloc3DArray( &m_pdCellIndexArray, &desc, extent, cudaArrayDefault ) ); // Copy data to 3D array. cudaMemcpy3DParms copyParms = {0}; copyParms.srcPtr = srcPtr; copyParms.dstArray = m_pdCellIndexArray; copyParms.extent = extent; copyParms.kind = cudaMemcpyHostToDevice; CUDA_SAFE_CALL( cudaMemcpy3D( &copyParms ) ); // Copy the m_worldSize and m_worldCells values to constant memory. CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldSizeV2", &m_worldSize, sizeof(float3) ) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldStepV2", &step, sizeof(float3) ) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldStepNormalizedV2", &stepNormalized, sizeof(float3) ) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldCellsV2", &m_worldCells, sizeof(uint3) ) ); // Free host memory. free( phCellIndices ); }
b1776d6c82cc4d02d40ee2df052ccc749e3f89c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <multigrid.h> __global__ void cero(Malla m) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<m.dim && y < m.dim) { m.v[x*m.dim+y]=0.0; } } __global__ void inicializa_f(Malla m) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double coord_x=1.0*y/(m.dim-1); double coord_y=1.0*x/(m.dim-1); if(x==m.dim-1 || y==m.dim-1 || x==0 || y==0) m.v[x*m.dim+y]=0.0; if(x<m.dim-1 && y<m.dim-1 && x>0 && y>0) m.v[x*m.dim+y]=cos(coord_x*coord_y); } __global__ void inicializa(Malla m) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x==m.dim-1 || y==m.dim-1 || x==0 || y==0) m.v[x*m.dim+y]=0.0; if(x<m.dim-1 && y<m.dim-1 && x>0 && y>0) m.v[x*m.dim+y]=1; } __global__ void suavizado_n(Malla u, Malla f) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double h2=pow(1.0/(u.dim-1),2); if(x<u.dim-1 && y < u.dim-1 && x>0 && y>0 && !((x+y)%2)) u.v[x*u.dim+y]=0.25*(f.v[x*u.dim+y]*h2+ u.v[(x-1)*u.dim+y]+ u.v[(x+1)*u.dim+y]+ u.v[x*u.dim+y-1]+ u.v[x*u.dim+y+1]); } __global__ void suavizado_r(Malla u, Malla f) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double h2=pow(1.0/(u.dim-1),2); if(x<u.dim-1 && y < u.dim-1 && x>0 && y>0 && ((x+y)%2)) u.v[x*u.dim+y]=0.25*(f.v[x*u.dim+y]*h2+ u.v[(x-1)*u.dim+y]+ u.v[(x+1)*u.dim+y]+ u.v[x*u.dim+y-1]+ u.v[x*u.dim+y+1]); } __global__ void defecto(Malla u, Malla f, Malla d) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double h2=pow(1.0/(u.dim-1),2); if(x==u.dim-1 || y==u.dim-1 || x==0 || y==0) d.v[x*u.dim+y]=0.0; if(x<u.dim-1 && y < u.dim-1 && x>0 && y>0) { d.v[x*u.dim+y]=f.v[x*u.dim+y]- (4*u.v[x*u.dim+y]-u.v[(x-1)*u.dim+y]-u.v[(x+1)*u.dim+y]-u.v[x*u.dim+(y-1)]-u.v[x*u.dim+(y+1)])/h2; } } __global__ void restringe(Malla u, Malla u_) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<u_.dim-1 && y<u_.dim-1 && x>0 && y>0) { u_.v[x*u_.dim+y]=( 4*u.v[(2*x )*u.dim+2*y ]+ 2*u.v[(2*x-1)*u.dim+2*y ]+ 2*u.v[(2*x+1)*u.dim+2*y ]+ 2*u.v[(2*x )*u.dim+2*y-1]+ 2*u.v[(2*x )*u.dim+2*y+1]+ u.v[(2*x-1)*u.dim+2*y-1]+ u.v[(2*x-1)*u.dim+2*y+1]+ u.v[(2*x+1)*u.dim+2*y-1]+ u.v[(2*x+1)*u.dim+2*y+1])/16; } } __global__ void interpola(Malla u_, Malla u) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<u_.dim && y< u_.dim ) { u.v[2*x*u.dim+2*y]=u_.v[x*u_.dim+y]; if(2*x+1<u.dim) u.v[(2*x+1)*u.dim+2*y]=(u_.v[x*u_.dim+y]+u_.v[(x+1)*u_.dim+y])/2; if(2*y+1<u.dim) u.v[2*x*u.dim+2*y+1]=(u_.v[x*u_.dim+y]+u_.v[x*u_.dim+y+1])/2; if(2*x+1<u.dim && 2*y+1<u.dim) u.v[(2*x+1)*u.dim+2*y+1]=(u_.v[x*u_.dim+y] +u_.v[(x+1)*u_.dim+y]+ u_.v[x*u_.dim+y+1]+u_.v[(x+1)*u_.dim+y+1])/4; } } __global__ void suma(Malla u, Malla v) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<u.dim && y< u.dim) { u.v[x*u.dim+y]=u.v[x*u.dim+y]+v.v[x*u.dim+y]; } } __global__ void soluciona(Malla u, Malla f) { u.v[4]=f.v[4]/16; } __global__ void calcula_max(Malla m, double * max) { int x = blockIdx.x*blockDim.x + threadIdx.x; int j; max[x]=0.0; if(x<m.dim) { for(j=1;j<m.dim-1;j++) { max[x]=x; if(abs(m.v[x*m.dim+j])>max[x]) max[x]=x; } } } __global__ void calcula_max2(double * max) { int x = blockIdx.x*blockDim.x + threadIdx.x; if(x<257) { max[x]=x; } }
b1776d6c82cc4d02d40ee2df052ccc749e3f89c4.cu
#include <multigrid.h> __global__ void cero(Malla m) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<m.dim && y < m.dim) { m.v[x*m.dim+y]=0.0; } } __global__ void inicializa_f(Malla m) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double coord_x=1.0*y/(m.dim-1); double coord_y=1.0*x/(m.dim-1); if(x==m.dim-1 || y==m.dim-1 || x==0 || y==0) m.v[x*m.dim+y]=0.0; if(x<m.dim-1 && y<m.dim-1 && x>0 && y>0) m.v[x*m.dim+y]=cos(coord_x*coord_y); } __global__ void inicializa(Malla m) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x==m.dim-1 || y==m.dim-1 || x==0 || y==0) m.v[x*m.dim+y]=0.0; if(x<m.dim-1 && y<m.dim-1 && x>0 && y>0) m.v[x*m.dim+y]=1; } __global__ void suavizado_n(Malla u, Malla f) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double h2=pow(1.0/(u.dim-1),2); if(x<u.dim-1 && y < u.dim-1 && x>0 && y>0 && !((x+y)%2)) u.v[x*u.dim+y]=0.25*(f.v[x*u.dim+y]*h2+ u.v[(x-1)*u.dim+y]+ u.v[(x+1)*u.dim+y]+ u.v[x*u.dim+y-1]+ u.v[x*u.dim+y+1]); } __global__ void suavizado_r(Malla u, Malla f) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double h2=pow(1.0/(u.dim-1),2); if(x<u.dim-1 && y < u.dim-1 && x>0 && y>0 && ((x+y)%2)) u.v[x*u.dim+y]=0.25*(f.v[x*u.dim+y]*h2+ u.v[(x-1)*u.dim+y]+ u.v[(x+1)*u.dim+y]+ u.v[x*u.dim+y-1]+ u.v[x*u.dim+y+1]); } __global__ void defecto(Malla u, Malla f, Malla d) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; double h2=pow(1.0/(u.dim-1),2); if(x==u.dim-1 || y==u.dim-1 || x==0 || y==0) d.v[x*u.dim+y]=0.0; if(x<u.dim-1 && y < u.dim-1 && x>0 && y>0) { d.v[x*u.dim+y]=f.v[x*u.dim+y]- (4*u.v[x*u.dim+y]-u.v[(x-1)*u.dim+y]-u.v[(x+1)*u.dim+y]-u.v[x*u.dim+(y-1)]-u.v[x*u.dim+(y+1)])/h2; } } __global__ void restringe(Malla u, Malla u_) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<u_.dim-1 && y<u_.dim-1 && x>0 && y>0) { u_.v[x*u_.dim+y]=( 4*u.v[(2*x )*u.dim+2*y ]+ 2*u.v[(2*x-1)*u.dim+2*y ]+ 2*u.v[(2*x+1)*u.dim+2*y ]+ 2*u.v[(2*x )*u.dim+2*y-1]+ 2*u.v[(2*x )*u.dim+2*y+1]+ u.v[(2*x-1)*u.dim+2*y-1]+ u.v[(2*x-1)*u.dim+2*y+1]+ u.v[(2*x+1)*u.dim+2*y-1]+ u.v[(2*x+1)*u.dim+2*y+1])/16; } } __global__ void interpola(Malla u_, Malla u) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<u_.dim && y< u_.dim ) { u.v[2*x*u.dim+2*y]=u_.v[x*u_.dim+y]; if(2*x+1<u.dim) u.v[(2*x+1)*u.dim+2*y]=(u_.v[x*u_.dim+y]+u_.v[(x+1)*u_.dim+y])/2; if(2*y+1<u.dim) u.v[2*x*u.dim+2*y+1]=(u_.v[x*u_.dim+y]+u_.v[x*u_.dim+y+1])/2; if(2*x+1<u.dim && 2*y+1<u.dim) u.v[(2*x+1)*u.dim+2*y+1]=(u_.v[x*u_.dim+y] +u_.v[(x+1)*u_.dim+y]+ u_.v[x*u_.dim+y+1]+u_.v[(x+1)*u_.dim+y+1])/4; } } __global__ void suma(Malla u, Malla v) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if(x<u.dim && y< u.dim) { u.v[x*u.dim+y]=u.v[x*u.dim+y]+v.v[x*u.dim+y]; } } __global__ void soluciona(Malla u, Malla f) { u.v[4]=f.v[4]/16; } __global__ void calcula_max(Malla m, double * max) { int x = blockIdx.x*blockDim.x + threadIdx.x; int j; max[x]=0.0; if(x<m.dim) { for(j=1;j<m.dim-1;j++) { max[x]=x; if(abs(m.v[x*m.dim+j])>max[x]) max[x]=x; } } } __global__ void calcula_max2(double * max) { int x = blockIdx.x*blockDim.x + threadIdx.x; if(x<257) { max[x]=x; } }
17e96236c534ec0e48a4d1b9cd65c64b98675be3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // svsvg.cu --- Part of the project OPLib 1.0, a high performance pricing library // based on operator methods, higher level BLAS and multicore architectures // Author: 2009 Claudio Albanese // Maintainer: Claudio Albanese <[email protected]> // Created: April-July 2009 // Version: 1.0.0 // Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by // Vasily Volkov's implementation of SGEMM // We use several variations of the multi-threaded Mersenne Twister algorithm of // period 2203 due to Makoto Matsumoto. // The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk // included in the CUDA SDK. // CPU-side BLAS and random number generators link to primitives in the // Intel Math Kernel Libraries. // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; see the file COPYING. If not, write to // the Free Software Foundation, Inc., 59 Temple Place - Suite 330, // Boston, MA 02111-1307, USA. #ifdef LINUX #define __declspec(x) #define __stdcall #endif __global__ void global_sgsvg(float* gen_yy_i, int ni, int nx, int nr, float *invm, float * xval_y, float *SDrift_yi, float *SVol_yi, float *VolDrift_yi, float *VolVol_yi, float *Jumpsz_minus_yi, float *Jumpsz_plus_yi){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int d = nx * nr; const int i = tid / d; const int y0 = tid - i * d; const int r0 = y0 / nx; const int x0 = y0 - r0 * nx; int x1, r1, y1; float xi; float rhs[4]; bool condition; rhs[0] = 0; condition = (x0 > 0 && x0 < nx - 1); rhs[1] = SVol_yi[y0 + d * i]; rhs[1] *= (condition * rhs[1]); rhs[2] = VolDrift_yi[y0 + d * i]; condition = (r0 > 0 && r0 < nr - 1); rhs[3] = VolVol_yi[y0 + d * i]; rhs[3] *= condition * rhs[3]; x1 = x0 - 1; r1 = r0; y1 = x1 + nx * r1; if (x0 > 0) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 0 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } x1 = x0 + 1; r1 = r0; y1 = x1 + nx * r1; if (x0 < nx - 1) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 1 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } x1 = x0; r1 = r0 - 1; y1 = x1 + nx * r1; if (r0 > 0) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 2 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } x1 = x0; r1 = r0 + 1; y1 = x1 + nx * r1; if (r0 < nr - 1) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 3 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } //add jumps float jumpsz_minus = Jumpsz_minus_yi[i * d + y0] * xval_y[y0]; float jumpsz_plus = Jumpsz_minus_yi[i * d + y0] * xval_y[y0]; for (int r1 = r0 + 1; r1 < nr; r1++) { float prob = 0; float sum0 = 0; for (int x1 = 0; x1 < nx; x1++) { int y1 = x1 + nx * r1; prob += gen_yy_i[i * d * d + (y0 + d * y1)]; } //prob is the total probability for a volatility transition if (prob > 0) { for (int x1 = 0; x1 < x0 - 1; x1++) { int y1 = x1 + nx * r1; if (jumpsz_minus > 0) { sum0 += exp((xval_y[y1] - xval_y[y0]) / jumpsz_minus); } } for (int x1 = x0 + 1; x1 < nx; x1++) { int y1 = x1 + nx * r1; if (jumpsz_plus > 0) { sum0 += exp(-(xval_y[y1] - xval_y[y0]) / jumpsz_plus); } } if (sum0 > 0) { float ratio = prob / sum0; for (int x1 = 0; x1 < x0 - 1; x1++) { int y1 = x1 + nx * r1; gen_yy_i[i * d * d + (y0 + d * y1)] = ratio * exp((xval_y[y1] - xval_y[y0]) / jumpsz_minus); } for (int x1 = x0 + 1; x1 < nx; x1++) { int y1 = x1 + nx * r1; gen_yy_i[i * d * d + (y0 + d * y1)] = ratio * exp(-(xval_y[y1] - xval_y[y0]) / jumpsz_plus); } } //end if(sum0>0) }//end if (prob0>0) }//end for (int r1 = r0 + 1; r1 < grid.nr; r1++) //fix up sum rules for drift and probability conservation float sum0 = 0; float drift = 0; for (int y1 = 0; y1 < d; y1++) { if (y0 != y1) { if (gen_yy_i[i * d * d + (y0 + d * y1)] < 0) { gen_yy_i[i * d * d + (y0 + d * y1)] = 0; } else { drift += gen_yy_i[i * d * d + (y0 + d * y1)] * (xval_y[y1] - xval_y[y0]); sum0 += gen_yy_i[i * d * d + (y0 + d * y1)]; } } } float drift0 = SDrift_yi[y0 + d * i]; if (drift > drift0) { if (x0 > 0) { int y1 = x0 - 1 + nx * r0; float ratio = (drift - drift0) / (xval_y[y1] - xval_y[y0]); gen_yy_i[i * d * d + (y0 + d * y1)] += ratio; sum0 += ratio; } } else { if (x0 < nx - 1) { int y1 = x0 + 1 + nx * r0; float ratio = (drift0 - drift) / (xval_y[y1] - xval_y[y0]); gen_yy_i[i * d * d + (y0 + d * y1)] += ratio; sum0 += ratio; } } gen_yy_i[i * d * d + (y0 + d * y0)] = -sum0; }// end function extern "C" __declspec( dllexport ) void opcuda_sgsvg(unsigned gen_yyi_ptr, int ni, int nx, int nr, unsigned invmat_ptr, unsigned parptr){ int d = nx*nr; if(d%32!=0) return; //assume d is a multiple of 32 float* gen_yyi = (float *) gen_yyi_ptr; float* invmat = (float *)invmat_ptr; float * xval_y = (float *) parptr; float* SDrift_yi = (float *) parptr + d; float* SVol_yi = (float *) parptr + d + ni * d; float* VolDrift_yi = (float *) parptr + d + 2 * ni * d; float* VolVol_yi = (float *) parptr + d + 3 * ni * d; float* Jumpsz_minus_yi = (float *) parptr + d + 4 * ni * d; float* Jumpsz_plus_yi = (float *) parptr + d + 5 * ni * d; hipLaunchKernelGGL(( global_sgsvg), dim3(ni * d / 32), dim3(32) , 0, 0, gen_yyi, ni, nx, nr, invmat, xval_y, SDrift_yi, SVol_yi, VolDrift_yi, VolVol_yi, Jumpsz_minus_yi, Jumpsz_plus_yi); } extern __shared__ float diag[]; __global__ void global_sgsvep(float* gen_yyq, int nq, int d, float *DeltaT_q, int *niter_q){ const int q = blockIdx.x; const int y0 = threadIdx.x/d; const int y1 = threadIdx.x%d; float DeltaT = DeltaT_q[q]; float *dt = diag + nq; if(y0==y1) { diag[y0] = gen_yyq[y0 + d * y0 + d * d * q]; } __syncthreads(); if(y0==y1 && y0==0) { float maxdiag = 0; for(int y=0; y<d; y++) if(-diag[y0]>maxdiag) maxdiag = -diag[y0]; dt[0] = 0.5 / maxdiag; if (dt[0] > 1.0 / 365.0) dt[0] = 1.0 / 365.0; niter_q[q] = (int) ceil(log(DeltaT / dt[0]) / log(2.0)); } __syncthreads(); gen_yyq[q * d * d + (y0 + d * y1)] *= dt[0]; __syncthreads(); if(y0==y1) { gen_yyq[y0 + d * y0 + d * q * q] += 1; } } extern "C" __declspec( dllexport ) void opcuda_sgsvep (unsigned genptr, int nq, int d, unsigned Deltaqptr, unsigned niterqptr){ float* gen_yyq = (float *) genptr; float* DeltaT_q = (float *) Deltaqptr; int *niter_q = (int *) niterqptr; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); int nprocessors = deviceProp.multiProcessorCount; int maxthreads = deviceProp.maxThreadsPerBlock; int nthreads_per_block = maxthreads; int nblocks = 2 * nprocessors; int number_of_blocks = max(nblocks, (int) ceil((float)nq * d / nthreads_per_block)); hipLaunchKernelGGL(( global_sgsvep), dim3(nthreads_per_block), dim3(number_of_blocks) , 0, 0, gen_yyq, nq, d, DeltaT_q, niter_q); }
17e96236c534ec0e48a4d1b9cd65c64b98675be3.cu
// svsvg.cu --- Part of the project OPLib 1.0, a high performance pricing library // based on operator methods, higher level BLAS and multicore architectures // Author: 2009 Claudio Albanese // Maintainer: Claudio Albanese <[email protected]> // Created: April-July 2009 // Version: 1.0.0 // Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by // Vasily Volkov's implementation of SGEMM // We use several variations of the multi-threaded Mersenne Twister algorithm of // period 2203 due to Makoto Matsumoto. // The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk // included in the CUDA SDK. // CPU-side BLAS and random number generators link to primitives in the // Intel Math Kernel Libraries. // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; see the file COPYING. If not, write to // the Free Software Foundation, Inc., 59 Temple Place - Suite 330, // Boston, MA 02111-1307, USA. #ifdef LINUX #define __declspec(x) #define __stdcall #endif __global__ void global_sgsvg(float* gen_yy_i, int ni, int nx, int nr, float *invm, float * xval_y, float *SDrift_yi, float *SVol_yi, float *VolDrift_yi, float *VolVol_yi, float *Jumpsz_minus_yi, float *Jumpsz_plus_yi){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int d = nx * nr; const int i = tid / d; const int y0 = tid - i * d; const int r0 = y0 / nx; const int x0 = y0 - r0 * nx; int x1, r1, y1; float xi; float rhs[4]; bool condition; rhs[0] = 0; condition = (x0 > 0 && x0 < nx - 1); rhs[1] = SVol_yi[y0 + d * i]; rhs[1] *= (condition * rhs[1]); rhs[2] = VolDrift_yi[y0 + d * i]; condition = (r0 > 0 && r0 < nr - 1); rhs[3] = VolVol_yi[y0 + d * i]; rhs[3] *= condition * rhs[3]; x1 = x0 - 1; r1 = r0; y1 = x1 + nx * r1; if (x0 > 0) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 0 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } x1 = x0 + 1; r1 = r0; y1 = x1 + nx * r1; if (x0 < nx - 1) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 1 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } x1 = x0; r1 = r0 - 1; y1 = x1 + nx * r1; if (r0 > 0) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 2 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } x1 = x0; r1 = r0 + 1; y1 = x1 + nx * r1; if (r0 < nr - 1) { xi = 0; for (int i1 = 0; i1 < 4; i1++) xi += invm[16 * y0 + 3 + 4 * i1] * rhs[i1]; gen_yy_i[i * d * d + (y0 + d * y1)] = xi; } //add jumps float jumpsz_minus = Jumpsz_minus_yi[i * d + y0] * xval_y[y0]; float jumpsz_plus = Jumpsz_minus_yi[i * d + y0] * xval_y[y0]; for (int r1 = r0 + 1; r1 < nr; r1++) { float prob = 0; float sum0 = 0; for (int x1 = 0; x1 < nx; x1++) { int y1 = x1 + nx * r1; prob += gen_yy_i[i * d * d + (y0 + d * y1)]; } //prob is the total probability for a volatility transition if (prob > 0) { for (int x1 = 0; x1 < x0 - 1; x1++) { int y1 = x1 + nx * r1; if (jumpsz_minus > 0) { sum0 += exp((xval_y[y1] - xval_y[y0]) / jumpsz_minus); } } for (int x1 = x0 + 1; x1 < nx; x1++) { int y1 = x1 + nx * r1; if (jumpsz_plus > 0) { sum0 += exp(-(xval_y[y1] - xval_y[y0]) / jumpsz_plus); } } if (sum0 > 0) { float ratio = prob / sum0; for (int x1 = 0; x1 < x0 - 1; x1++) { int y1 = x1 + nx * r1; gen_yy_i[i * d * d + (y0 + d * y1)] = ratio * exp((xval_y[y1] - xval_y[y0]) / jumpsz_minus); } for (int x1 = x0 + 1; x1 < nx; x1++) { int y1 = x1 + nx * r1; gen_yy_i[i * d * d + (y0 + d * y1)] = ratio * exp(-(xval_y[y1] - xval_y[y0]) / jumpsz_plus); } } //end if(sum0>0) }//end if (prob0>0) }//end for (int r1 = r0 + 1; r1 < grid.nr; r1++) //fix up sum rules for drift and probability conservation float sum0 = 0; float drift = 0; for (int y1 = 0; y1 < d; y1++) { if (y0 != y1) { if (gen_yy_i[i * d * d + (y0 + d * y1)] < 0) { gen_yy_i[i * d * d + (y0 + d * y1)] = 0; } else { drift += gen_yy_i[i * d * d + (y0 + d * y1)] * (xval_y[y1] - xval_y[y0]); sum0 += gen_yy_i[i * d * d + (y0 + d * y1)]; } } } float drift0 = SDrift_yi[y0 + d * i]; if (drift > drift0) { if (x0 > 0) { int y1 = x0 - 1 + nx * r0; float ratio = (drift - drift0) / (xval_y[y1] - xval_y[y0]); gen_yy_i[i * d * d + (y0 + d * y1)] += ratio; sum0 += ratio; } } else { if (x0 < nx - 1) { int y1 = x0 + 1 + nx * r0; float ratio = (drift0 - drift) / (xval_y[y1] - xval_y[y0]); gen_yy_i[i * d * d + (y0 + d * y1)] += ratio; sum0 += ratio; } } gen_yy_i[i * d * d + (y0 + d * y0)] = -sum0; }// end function extern "C" __declspec( dllexport ) void opcuda_sgsvg(unsigned gen_yyi_ptr, int ni, int nx, int nr, unsigned invmat_ptr, unsigned parptr){ int d = nx*nr; if(d%32!=0) return; //assume d is a multiple of 32 float* gen_yyi = (float *) gen_yyi_ptr; float* invmat = (float *)invmat_ptr; float * xval_y = (float *) parptr; float* SDrift_yi = (float *) parptr + d; float* SVol_yi = (float *) parptr + d + ni * d; float* VolDrift_yi = (float *) parptr + d + 2 * ni * d; float* VolVol_yi = (float *) parptr + d + 3 * ni * d; float* Jumpsz_minus_yi = (float *) parptr + d + 4 * ni * d; float* Jumpsz_plus_yi = (float *) parptr + d + 5 * ni * d; global_sgsvg<<< ni * d / 32, 32 >>> (gen_yyi, ni, nx, nr, invmat, xval_y, SDrift_yi, SVol_yi, VolDrift_yi, VolVol_yi, Jumpsz_minus_yi, Jumpsz_plus_yi); } extern __shared__ float diag[]; __global__ void global_sgsvep(float* gen_yyq, int nq, int d, float *DeltaT_q, int *niter_q){ const int q = blockIdx.x; const int y0 = threadIdx.x/d; const int y1 = threadIdx.x%d; float DeltaT = DeltaT_q[q]; float *dt = diag + nq; if(y0==y1) { diag[y0] = gen_yyq[y0 + d * y0 + d * d * q]; } __syncthreads(); if(y0==y1 && y0==0) { float maxdiag = 0; for(int y=0; y<d; y++) if(-diag[y0]>maxdiag) maxdiag = -diag[y0]; dt[0] = 0.5 / maxdiag; if (dt[0] > 1.0 / 365.0) dt[0] = 1.0 / 365.0; niter_q[q] = (int) ceil(log(DeltaT / dt[0]) / log(2.0)); } __syncthreads(); gen_yyq[q * d * d + (y0 + d * y1)] *= dt[0]; __syncthreads(); if(y0==y1) { gen_yyq[y0 + d * y0 + d * q * q] += 1; } } extern "C" __declspec( dllexport ) void opcuda_sgsvep (unsigned genptr, int nq, int d, unsigned Deltaqptr, unsigned niterqptr){ float* gen_yyq = (float *) genptr; float* DeltaT_q = (float *) Deltaqptr; int *niter_q = (int *) niterqptr; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); int nprocessors = deviceProp.multiProcessorCount; int maxthreads = deviceProp.maxThreadsPerBlock; int nthreads_per_block = maxthreads; int nblocks = 2 * nprocessors; int number_of_blocks = max(nblocks, (int) ceil((float)nq * d / nthreads_per_block)); global_sgsvep<<< nthreads_per_block, number_of_blocks >>> (gen_yyq, nq, d, DeltaT_q, niter_q); }
95376edc13906b498f67382e68febe43e2b326db.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <stdio.h> #include <vector> #include <algorithm> #include <iterator> #include <utility> #include <math.h> #include <omp.h> #include <hip/hip_runtime.h> #include "util.h" #include "kernel.h" #include <bits/stdc++.h> using namespace std; long n_rows, n_cols, nnz; int tile_sizeX, tile_sizeY, k, actv_row_size; int BLOCKSIZE=1024; inline hipError_t checkCuda(hipError_t result, int s){ if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", hipGetErrorString(result), s); assert(result == hipSuccess); } return result; } void sddmm_GPU(int * d_row_ptr, int * d_row_ind, int *d_col_ind, float * d_val_ind, float * d_W, float *d_H, int *d_tiled_ind, int *d_lastIdx, int *lastIdx_tile, int *d_lastIdx_block_tile , int *d_active_row, int * count_actv_row, int &max_active_block, int *d_no_block_tile, long new_nnz, int &max_active_row, float *d_p){ int n_tile = n_cols/tile_sizeX + 1; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipStream_t stream[n_tile]; for (int i = 0; i < n_tile; i++){ hipStreamCreate(&(stream[i])); } float mili =0, copyTime = 0 ; dim3 block(BLOCKSIZE,1,1), grid(1,1,1); //grid.x = (new_nnz + BLOCKSIZE - 1) / BLOCKSIZE; grid.x = n_cols/tile_sizeX+1; // grid.x = (32 * n_rows + BLOCKSIZE - 1) / BLOCKSIZE; checkCuda(hipEventRecord(start), __LINE__); if(tile_sizeX == 96) hipLaunchKernelGGL(( comp_kernel_COO), dim3(grid), dim3(block), 0, stream[0], d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX == 192) //change orientation of W and H hipLaunchKernelGGL(( comp_kernel_COO_kslc16_adv), dim3(grid), dim3(block), 0, stream[0], d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); // comp_kernel_COO_kslc16<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, // new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, // max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX == 384) //change orientation hipLaunchKernelGGL(( comp_kernel_COO_kslc8), dim3(grid), dim3(block), 0, stream[0], d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX == 768) //change orientation hipLaunchKernelGGL(( comp_kernel_COO_kslc4), dim3(grid), dim3(block), 0, stream[0], d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX >= 1024) //change orientation hipLaunchKernelGGL(( comp_kernel_COO_DGEMM), dim3(grid), dim3(block), 0, stream[0], d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); checkCuda(hipEventRecord(stop), __LINE__); hipEventSynchronize(stop); //hipDeviceSynchronize(); checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__); hipDeviceSynchronize(); cout << "GPU time " << mili << "ms"<< endl; } void sddmm_CPU_CSR(int * row_ptr, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) long tot =0 ; #pragma omp parallel for reduction(+:tot) for (int r = 0; r < n_rows; ++r){ tot += row_ptr[r+1] - row_ptr[r]; float sm =0 ; for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind){ int row = r; int col = col_ind[ind]; int nnz = row_ptr[r+1]-row_ptr[r]; float val = val_ind[ind]; sm=0; for (int t = 0; t < k; ++t){ sm += W[row * k + t] * H[col * k + t]; // cout <<W[row * k + t] <<" "<<H[col * k + t]<< endl; } p_ind[ind] = sm * val_ind[ind]; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; } } cout << "CPU tot " << tot << endl; for (int r = 500000; r < 500005; ++r) // for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind) cout << "row " << r << " " <<" "<< p_ind[r]<< endl; } void sddmm_CPU_COO(int * row_ind, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) double start_time = omp_get_wtime(); omp_set_dynamic(0); omp_set_num_threads(28); #pragma omp parallel for //reduction(+:tot) for (int ind = 0; ind < nnz; ind++){ float sm =0 ; int row = row_ind[ind]; int col = col_ind[ind]; for (int t = 0; t < k; ++t) sm += W[row * k + t] * H[col * k + t]; p_ind[ind] = sm * val_ind[ind]; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; // } } double CPU_time = omp_get_wtime() - start_time; //correctness check printf("\nomp time CPU : %.4f \n\n", CPU_time*1000); } void init(int *rows, int *cols, float* vals){ int n_bin=10; int *count = new int[n_bin]; int *row_ptr = new int[n_rows+1]; float *p_ind = new float[nnz]; float *W = new float[n_rows*k]; float *W_t = new float[n_rows*k]; float *H = new float[n_cols*k]; float *H_t = new float[n_cols*k]; int n_tile_c = n_cols/tile_sizeX + 1; int n_tile_r = n_rows/tile_sizeY + 1; int max_active_block = (n_rows/actv_row_size+1); int *count_actv_row = new int[n_tile_c]; int *no_block_tile = new int[n_tile_c]; int *lastIdx_tile = new int[n_tile_c+1]; int *lastIdx_block_tile = new int[(n_tile_c+1) * (n_rows/actv_row_size+1)]; float *d_val, *d_W, *d_H, *d_W_t, *d_p; int *d_row_ptr, *d_col_ind, *d_row_ind, *d_tiled_ind, *d_lastIdx, *d_active_row, *d_lastIdx_block_tile, *d_no_block_tile; int n_tileX = n_cols/tile_sizeX+1; int n_tileY = n_rows/tile_sizeY+1; long new_nnz =0 ; initial(W, n_rows, k); initial(H, n_cols, k); make_HTasH(H, H_t, n_cols, k); make_HTasH(W, W_t, n_rows, k); cout << n_cols <<" "<<n_tile_c << endl; int *new_rows = new int[nnz ]; int *new_cols = new int[nnz ]; float *new_vals = new float[nnz]; int *tiled_ind = new int [nnz ]; int *active_row = new int[n_tileX * n_rows]; // int *new_rows = new int[nnz]; // int *new_cols = new int[nnz]; // float *new_vals = new float[nnz]; //converting col sorted matrix to row sorted //unsorted_make_CSR(rows, cols, vals, nnz, n_rows, n_cols, row_ptr); //assuming sorted make_CSR(rows, cols, vals, nnz, n_rows, row_ptr); //comp_bin(n_bin, count, n_rows, row_ptr, nnz); int max_active_row=0; max_active_row = rewrite_matrix_1D(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, tile_sizeX, tiled_ind, lastIdx_tile, active_row, lastIdx_block_tile, count_actv_row, actv_row_size, new_nnz, no_block_tile, actv_row_size); // write_mat(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tile_sizeY, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz); // rewrite_col_sorted_matrix(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz); double t0 = seconds(); // sddmm_CPU_CSR(row_ptr, cols, vals, W, H, p_ind); sddmm_CPU_COO(rows, cols, vals, W, H, p_ind); //***********Starting GPU**************** checkCuda(hipMalloc((void**)&d_W, k*n_rows*sizeof(float)),0); checkCuda(hipMalloc((void**)&d_H, k*n_cols*sizeof(float)),1); // checkCuda(hipMalloc((void**)&d_row_ptr, (n_rows+1)*sizeof(int)),2); checkCuda(hipMalloc((void**)&d_row_ind, new_nnz*sizeof(int)),4); if(actv_row_size > 256) checkCuda(hipMalloc((void**)&d_col_ind, new_nnz*sizeof(int)),4); checkCuda(hipMalloc((void**)&d_val, new_nnz*sizeof(float)),4); checkCuda(hipMalloc((void**)&d_p, new_nnz*sizeof(float)),4); checkCuda(hipMalloc((void**)&d_lastIdx, (n_tile_c+1)*sizeof(float)),4); checkCuda(hipMalloc((void**)&d_no_block_tile, (n_tile_c)*sizeof(float)),4); checkCuda(hipMalloc((void**)&d_active_row, n_tileX*max_active_row*sizeof(int)),4); checkCuda(hipMemcpy(d_row_ind, &(new_rows[0]), new_nnz*sizeof(int), hipMemcpyHostToDevice),4); if(actv_row_size > 256) checkCuda(hipMemcpy(d_col_ind, &(new_cols[0]), new_nnz*sizeof(int), hipMemcpyHostToDevice),4); checkCuda(hipMemcpy(d_val, &(new_vals[0]), new_nnz*sizeof(float), hipMemcpyHostToDevice),4); checkCuda(hipMemcpy(d_lastIdx, &(lastIdx_tile[0]), (n_tile_c+1)*sizeof(int), hipMemcpyHostToDevice),4); checkCuda(hipMalloc((void**)&d_lastIdx_block_tile, n_tileX*max_active_block*sizeof(int)),4); checkCuda(hipMemcpy(d_no_block_tile, &(no_block_tile[0]), (n_tile_c)*sizeof(int), hipMemcpyHostToDevice),4); hipMemset(d_p, 0, new_nnz*sizeof(float)); for (int i = 0; i < n_tileX; ++i) checkCuda(hipMemcpy(d_lastIdx_block_tile+i*max_active_block, &(lastIdx_block_tile[i*max_active_block]), max_active_block*sizeof(int), hipMemcpyHostToDevice),4); int sum =0 ; // for (int i = 0; i < n_tileX; ++i){ // checkCuda(hipMemcpy(d_active_row+sum, &(active_row[i*n_rows]), count_actv_row[i]*sizeof(int), hipMemcpyHostToDevice),4); // sum += count_actv_row[i]; // } for (int i = 0; i < n_tileX; ++i){ checkCuda(hipMemcpy(d_active_row+sum, &(active_row[i*n_rows]), max_active_row*sizeof(int), hipMemcpyHostToDevice),4); sum += max_active_row; } // checkCuda(hipMemcpy(d_tiled_ind, &(tiled_ind[0]), nnz*sizeof(int), hipMemcpyHostToDevice),4);; //hipMemcpy(d_W, &(W[0]), n_rows * k *sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_W, &(W_t[0]), n_rows * k *sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(d_H, &(H[0]), n_cols * k *sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_H, &(H_t[0]), n_cols * k *sizeof(float), hipMemcpyHostToDevice); sddmm_GPU(d_row_ptr, d_row_ind, d_col_ind, d_val, d_W, d_H, d_tiled_ind, d_lastIdx, lastIdx_tile, d_lastIdx_block_tile, d_active_row, count_actv_row ,max_active_block, d_no_block_tile, new_nnz, max_active_row, d_p ); //******** correctness check float GPU_tot = 0, CPU_tot =0, CPU_tot_orig =0 ; float *p_ind_temp = new float[new_nnz]; checkCuda(hipMemcpy(&(p_ind_temp[0]), d_p, new_nnz*sizeof(float), hipMemcpyDeviceToHost),4);; for (int i = 0; i < nnz; ++i){ CPU_tot += p_ind[tiled_ind[i]]; CPU_tot_orig += p_ind[i]; // cout << "p_ind " << p_ind[tiled_ind[i]] << " " << p_ind[i] << " new,old ind: "<<tiled_ind[i] <<" "<<i<< endl; } for (int i = 200511; i < 200511+3; ++i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; for (int i = nnz-1; i > nnz-3; --i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; long diff_tot = 0; for (int i = 0; i < new_nnz; ++i){ if(abs(p_ind_temp[i]-p_ind[tiled_ind[i]]) > .00001){ diff_tot ++; if(diff_tot < 5) printf("CPU GPU diff %d: %f %f %f \n", i, p_ind_temp[i], p_ind[tiled_ind[i]],p_ind_temp[i]-p_ind[tiled_ind[i]] ); } } cout << "diff values in CPU and GPU: " << diff_tot << endl; //freeing device allocation hipFree( d_row_ptr ); hipFree( d_row_ind); hipFree( d_col_ind); hipFree( d_val); hipFree( d_W ); hipFree( d_H ); delete(rows); delete(cols); delete(vals); } int main(int argc, char* argv[]){ ifstream fp(argv[1]); k = atoi(argv[2]); tile_sizeX = atoi(argv[3]); string str; getline(fp,str); while(!isdigit(str[0])){ getline(fp,str); } istringstream is(str); is >> n_rows; is >> n_cols; is >> nnz; //fp >> n_rows >> n_cols >> nnz; long orig_nnz=nnz, rid=0,cid=0; float vid=0; int *rows = new int[nnz]; int *cols = new int[nnz]; float *vals = new float[nnz]; long idx=0; for (long o_idx = 0; o_idx < orig_nnz; ++o_idx) { fp >> rid >> cid >> vid; rows[idx]=rid-1; cols[idx]=cid-1; vals[idx]=vid; idx++; } actv_row_size = tile_sizeX; cout << "row, col, nnz: "<<n_rows << " "<<n_cols <<" "<< nnz << endl; cout << " tile-sizeX: " << tile_sizeX << " tile-sizeY: " << actv_row_size << " k: "<<k << endl; nnz=idx; init(rows, cols, vals); }
95376edc13906b498f67382e68febe43e2b326db.cu
#include <iostream> #include <fstream> #include <stdio.h> #include <vector> #include <algorithm> #include <iterator> #include <utility> #include <math.h> #include <omp.h> #include <cuda.h> #include "util.h" #include "kernel.h" #include <bits/stdc++.h> using namespace std; long n_rows, n_cols, nnz; int tile_sizeX, tile_sizeY, k, actv_row_size; int BLOCKSIZE=1024; inline cudaError_t checkCuda(cudaError_t result, int s){ if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", cudaGetErrorString(result), s); assert(result == cudaSuccess); } return result; } void sddmm_GPU(int * d_row_ptr, int * d_row_ind, int *d_col_ind, float * d_val_ind, float * d_W, float *d_H, int *d_tiled_ind, int *d_lastIdx, int *lastIdx_tile, int *d_lastIdx_block_tile , int *d_active_row, int * count_actv_row, int &max_active_block, int *d_no_block_tile, long new_nnz, int &max_active_row, float *d_p){ int n_tile = n_cols/tile_sizeX + 1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaStream_t stream[n_tile]; for (int i = 0; i < n_tile; i++){ cudaStreamCreate(&(stream[i])); } float mili =0, copyTime = 0 ; dim3 block(BLOCKSIZE,1,1), grid(1,1,1); //grid.x = (new_nnz + BLOCKSIZE - 1) / BLOCKSIZE; grid.x = n_cols/tile_sizeX+1; // grid.x = (32 * n_rows + BLOCKSIZE - 1) / BLOCKSIZE; checkCuda(cudaEventRecord(start), __LINE__); if(tile_sizeX == 96) comp_kernel_COO<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX == 192) //change orientation of W and H comp_kernel_COO_kslc16_adv<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); // comp_kernel_COO_kslc16<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, // new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, // max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX == 384) //change orientation comp_kernel_COO_kslc8<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX == 768) //change orientation comp_kernel_COO_kslc4<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); else if(tile_sizeX >= 1024) //change orientation comp_kernel_COO_DGEMM<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_active_row, d_lastIdx, d_lastIdx_block_tile, d_no_block_tile, max_active_block, tile_sizeX, max_active_row, d_p); checkCuda(cudaEventRecord(stop), __LINE__); cudaEventSynchronize(stop); //cudaDeviceSynchronize(); checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__); cudaDeviceSynchronize(); cout << "GPU time " << mili << "ms"<< endl; } void sddmm_CPU_CSR(int * row_ptr, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) long tot =0 ; #pragma omp parallel for reduction(+:tot) for (int r = 0; r < n_rows; ++r){ tot += row_ptr[r+1] - row_ptr[r]; float sm =0 ; for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind){ int row = r; int col = col_ind[ind]; int nnz = row_ptr[r+1]-row_ptr[r]; float val = val_ind[ind]; sm=0; for (int t = 0; t < k; ++t){ sm += W[row * k + t] * H[col * k + t]; // cout <<W[row * k + t] <<" "<<H[col * k + t]<< endl; } p_ind[ind] = sm * val_ind[ind]; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; } } cout << "CPU tot " << tot << endl; for (int r = 500000; r < 500005; ++r) // for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind) cout << "row " << r << " " <<" "<< p_ind[r]<< endl; } void sddmm_CPU_COO(int * row_ind, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) double start_time = omp_get_wtime(); omp_set_dynamic(0); omp_set_num_threads(28); #pragma omp parallel for //reduction(+:tot) for (int ind = 0; ind < nnz; ind++){ float sm =0 ; int row = row_ind[ind]; int col = col_ind[ind]; for (int t = 0; t < k; ++t) sm += W[row * k + t] * H[col * k + t]; p_ind[ind] = sm * val_ind[ind]; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; // } } double CPU_time = omp_get_wtime() - start_time; //correctness check printf("\nomp time CPU : %.4f \n\n", CPU_time*1000); } void init(int *rows, int *cols, float* vals){ int n_bin=10; int *count = new int[n_bin]; int *row_ptr = new int[n_rows+1]; float *p_ind = new float[nnz]; float *W = new float[n_rows*k]; float *W_t = new float[n_rows*k]; float *H = new float[n_cols*k]; float *H_t = new float[n_cols*k]; int n_tile_c = n_cols/tile_sizeX + 1; int n_tile_r = n_rows/tile_sizeY + 1; int max_active_block = (n_rows/actv_row_size+1); int *count_actv_row = new int[n_tile_c]; int *no_block_tile = new int[n_tile_c]; int *lastIdx_tile = new int[n_tile_c+1]; int *lastIdx_block_tile = new int[(n_tile_c+1) * (n_rows/actv_row_size+1)]; float *d_val, *d_W, *d_H, *d_W_t, *d_p; int *d_row_ptr, *d_col_ind, *d_row_ind, *d_tiled_ind, *d_lastIdx, *d_active_row, *d_lastIdx_block_tile, *d_no_block_tile; int n_tileX = n_cols/tile_sizeX+1; int n_tileY = n_rows/tile_sizeY+1; long new_nnz =0 ; initial(W, n_rows, k); initial(H, n_cols, k); make_HTasH(H, H_t, n_cols, k); make_HTasH(W, W_t, n_rows, k); cout << n_cols <<" "<<n_tile_c << endl; int *new_rows = new int[nnz ]; int *new_cols = new int[nnz ]; float *new_vals = new float[nnz]; int *tiled_ind = new int [nnz ]; int *active_row = new int[n_tileX * n_rows]; // int *new_rows = new int[nnz]; // int *new_cols = new int[nnz]; // float *new_vals = new float[nnz]; //converting col sorted matrix to row sorted //unsorted_make_CSR(rows, cols, vals, nnz, n_rows, n_cols, row_ptr); //assuming sorted make_CSR(rows, cols, vals, nnz, n_rows, row_ptr); //comp_bin(n_bin, count, n_rows, row_ptr, nnz); int max_active_row=0; max_active_row = rewrite_matrix_1D(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, tile_sizeX, tiled_ind, lastIdx_tile, active_row, lastIdx_block_tile, count_actv_row, actv_row_size, new_nnz, no_block_tile, actv_row_size); // write_mat(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tile_sizeY, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz); // rewrite_col_sorted_matrix(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz); double t0 = seconds(); // sddmm_CPU_CSR(row_ptr, cols, vals, W, H, p_ind); sddmm_CPU_COO(rows, cols, vals, W, H, p_ind); //***********Starting GPU**************** checkCuda(cudaMalloc((void**)&d_W, k*n_rows*sizeof(float)),0); checkCuda(cudaMalloc((void**)&d_H, k*n_cols*sizeof(float)),1); // checkCuda(cudaMalloc((void**)&d_row_ptr, (n_rows+1)*sizeof(int)),2); checkCuda(cudaMalloc((void**)&d_row_ind, new_nnz*sizeof(int)),4); if(actv_row_size > 256) checkCuda(cudaMalloc((void**)&d_col_ind, new_nnz*sizeof(int)),4); checkCuda(cudaMalloc((void**)&d_val, new_nnz*sizeof(float)),4); checkCuda(cudaMalloc((void**)&d_p, new_nnz*sizeof(float)),4); checkCuda(cudaMalloc((void**)&d_lastIdx, (n_tile_c+1)*sizeof(float)),4); checkCuda(cudaMalloc((void**)&d_no_block_tile, (n_tile_c)*sizeof(float)),4); checkCuda(cudaMalloc((void**)&d_active_row, n_tileX*max_active_row*sizeof(int)),4); checkCuda(cudaMemcpy(d_row_ind, &(new_rows[0]), new_nnz*sizeof(int), cudaMemcpyHostToDevice),4); if(actv_row_size > 256) checkCuda(cudaMemcpy(d_col_ind, &(new_cols[0]), new_nnz*sizeof(int), cudaMemcpyHostToDevice),4); checkCuda(cudaMemcpy(d_val, &(new_vals[0]), new_nnz*sizeof(float), cudaMemcpyHostToDevice),4); checkCuda(cudaMemcpy(d_lastIdx, &(lastIdx_tile[0]), (n_tile_c+1)*sizeof(int), cudaMemcpyHostToDevice),4); checkCuda(cudaMalloc((void**)&d_lastIdx_block_tile, n_tileX*max_active_block*sizeof(int)),4); checkCuda(cudaMemcpy(d_no_block_tile, &(no_block_tile[0]), (n_tile_c)*sizeof(int), cudaMemcpyHostToDevice),4); cudaMemset(d_p, 0, new_nnz*sizeof(float)); for (int i = 0; i < n_tileX; ++i) checkCuda(cudaMemcpy(d_lastIdx_block_tile+i*max_active_block, &(lastIdx_block_tile[i*max_active_block]), max_active_block*sizeof(int), cudaMemcpyHostToDevice),4); int sum =0 ; // for (int i = 0; i < n_tileX; ++i){ // checkCuda(cudaMemcpy(d_active_row+sum, &(active_row[i*n_rows]), count_actv_row[i]*sizeof(int), cudaMemcpyHostToDevice),4); // sum += count_actv_row[i]; // } for (int i = 0; i < n_tileX; ++i){ checkCuda(cudaMemcpy(d_active_row+sum, &(active_row[i*n_rows]), max_active_row*sizeof(int), cudaMemcpyHostToDevice),4); sum += max_active_row; } // checkCuda(cudaMemcpy(d_tiled_ind, &(tiled_ind[0]), nnz*sizeof(int), cudaMemcpyHostToDevice),4);; //cudaMemcpy(d_W, &(W[0]), n_rows * k *sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_W, &(W_t[0]), n_rows * k *sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(d_H, &(H[0]), n_cols * k *sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_H, &(H_t[0]), n_cols * k *sizeof(float), cudaMemcpyHostToDevice); sddmm_GPU(d_row_ptr, d_row_ind, d_col_ind, d_val, d_W, d_H, d_tiled_ind, d_lastIdx, lastIdx_tile, d_lastIdx_block_tile, d_active_row, count_actv_row ,max_active_block, d_no_block_tile, new_nnz, max_active_row, d_p ); //******** correctness check float GPU_tot = 0, CPU_tot =0, CPU_tot_orig =0 ; float *p_ind_temp = new float[new_nnz]; checkCuda(cudaMemcpy(&(p_ind_temp[0]), d_p, new_nnz*sizeof(float), cudaMemcpyDeviceToHost),4);; for (int i = 0; i < nnz; ++i){ CPU_tot += p_ind[tiled_ind[i]]; CPU_tot_orig += p_ind[i]; // cout << "p_ind " << p_ind[tiled_ind[i]] << " " << p_ind[i] << " new,old ind: "<<tiled_ind[i] <<" "<<i<< endl; } for (int i = 200511; i < 200511+3; ++i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; for (int i = nnz-1; i > nnz-3; --i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; long diff_tot = 0; for (int i = 0; i < new_nnz; ++i){ if(abs(p_ind_temp[i]-p_ind[tiled_ind[i]]) > .00001){ diff_tot ++; if(diff_tot < 5) printf("CPU GPU diff %d: %f %f %f \n", i, p_ind_temp[i], p_ind[tiled_ind[i]],p_ind_temp[i]-p_ind[tiled_ind[i]] ); } } cout << "diff values in CPU and GPU: " << diff_tot << endl; //freeing device allocation cudaFree( d_row_ptr ); cudaFree( d_row_ind); cudaFree( d_col_ind); cudaFree( d_val); cudaFree( d_W ); cudaFree( d_H ); delete(rows); delete(cols); delete(vals); } int main(int argc, char* argv[]){ ifstream fp(argv[1]); k = atoi(argv[2]); tile_sizeX = atoi(argv[3]); string str; getline(fp,str); while(!isdigit(str[0])){ getline(fp,str); } istringstream is(str); is >> n_rows; is >> n_cols; is >> nnz; //fp >> n_rows >> n_cols >> nnz; long orig_nnz=nnz, rid=0,cid=0; float vid=0; int *rows = new int[nnz]; int *cols = new int[nnz]; float *vals = new float[nnz]; long idx=0; for (long o_idx = 0; o_idx < orig_nnz; ++o_idx) { fp >> rid >> cid >> vid; rows[idx]=rid-1; cols[idx]=cid-1; vals[idx]=vid; idx++; } actv_row_size = tile_sizeX; cout << "row, col, nnz: "<<n_rows << " "<<n_cols <<" "<< nnz << endl; cout << " tile-sizeX: " << tile_sizeX << " tile-sizeY: " << actv_row_size << " k: "<<k << endl; nnz=idx; init(rows, cols, vals); }
5fe49149053e7e6985a39d23bc65b7e3aeea5c7d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> #define __DEBUG #define VSQR 0.1 #define TSCALE 1.0 #define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__) extern int tpdt(double *t, double dt, double end_time); /************************************** * void __cudaSafeCall(hipError_t err, const char *file, const int line) * void __cudaCheckError(const char *file, const int line) * * These routines were taken from the GPU Computing SDK * (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h" **************************************/ inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment if not needed. /*err = hipDeviceSynchronize(); if( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n", file, line, hipGetErrorString( err ) ); exit( -1 ); }*/ } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } __device__ double f_CUDA(double p, double t) { return -__expf(-TSCALE * t) * p; } __global__ void evolve9ptCUDA(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t) { int idx = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y; int i = idx / n; int j = idx % n; if(!(i == 0 || i == n - 1 || j == 0 || j == n - 1)) un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t)); else un[idx] = 0.; } void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads) { hipEvent_t kstart, kstop; float ktime; double *un, *uc, *uo, *pb, *temp; double t, dt; /* Set up device timers */ CUDA_CALL(hipSetDevice(0)); CUDA_CALL(hipEventCreate(&kstart)); CUDA_CALL(hipEventCreate(&kstop)); t = 0.; dt = h/2.; hipMalloc((void **)&un, sizeof(double) * n * n); hipMalloc((void **)&uc, sizeof(double) * n * n); hipMalloc((void **)&uo, sizeof(double) * n * n); hipMalloc((void **)&pb, sizeof(double) * n * n); hipMemcpy(uo, u0, sizeof(double) * n * n, hipMemcpyHostToDevice); hipMemcpy(uc, u1, sizeof(double) * n * n, hipMemcpyHostToDevice); hipMemcpy(pb, pebbles, sizeof(double) * n * n, hipMemcpyHostToDevice); dim3 block_dim(nthreads, nthreads,1); dim3 grid_dim(n/nthreads, n/nthreads,1); /* Start GPU computation timer */ CUDA_CALL(hipEventRecord(kstart, 0)); while(1) { hipLaunchKernelGGL(( evolve9ptCUDA), dim3(grid_dim), dim3(block_dim), 0, 0, un, uc, uo, pb, n, h, dt, t); temp = uc; uc = un; un = uo; uo = temp; if(!tpdt(&t, dt, end_time)) break; } hipMemcpy(u, uc, sizeof(double) * n * n, hipMemcpyDeviceToHost); /* Stop GPU computation timer */ CUDA_CALL(hipEventRecord(kstop, 0)); CUDA_CALL(hipEventSynchronize(kstop)); CUDA_CALL(hipEventElapsedTime(&ktime, kstart, kstop)); printf("GPU computation: %f msec\n", ktime); hipFree(un); hipFree(uc); hipFree(uo); hipFree(pb); /* timer cleanup */ CUDA_CALL(hipEventDestroy(kstart)); CUDA_CALL(hipEventDestroy(kstop)); }
5fe49149053e7e6985a39d23bc65b7e3aeea5c7d.cu
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> #define __DEBUG #define VSQR 0.1 #define TSCALE 1.0 #define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__) extern int tpdt(double *t, double dt, double end_time); /************************************** * void __cudaSafeCall(cudaError err, const char *file, const int line) * void __cudaCheckError(const char *file, const int line) * * These routines were taken from the GPU Computing SDK * (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h" **************************************/ inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment if not needed. /*err = cudaThreadSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); }*/ } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } __device__ double f_CUDA(double p, double t) { return -__expf(-TSCALE * t) * p; } __global__ void evolve9ptCUDA(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t) { int idx = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y; int i = idx / n; int j = idx % n; if(!(i == 0 || i == n - 1 || j == 0 || j == n - 1)) un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx - n] + 0.25*(uc[idx + n - 1] + uc[idx + n + 1] + uc[idx - n - 1] + uc[idx - n + 1])- 5 * uc[idx])/(h * h) + f_CUDA(pebbles[idx],t)); else un[idx] = 0.; } void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads) { cudaEvent_t kstart, kstop; float ktime; double *un, *uc, *uo, *pb, *temp; double t, dt; /* Set up device timers */ CUDA_CALL(cudaSetDevice(0)); CUDA_CALL(cudaEventCreate(&kstart)); CUDA_CALL(cudaEventCreate(&kstop)); t = 0.; dt = h/2.; cudaMalloc((void **)&un, sizeof(double) * n * n); cudaMalloc((void **)&uc, sizeof(double) * n * n); cudaMalloc((void **)&uo, sizeof(double) * n * n); cudaMalloc((void **)&pb, sizeof(double) * n * n); cudaMemcpy(uo, u0, sizeof(double) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(uc, u1, sizeof(double) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(pb, pebbles, sizeof(double) * n * n, cudaMemcpyHostToDevice); dim3 block_dim(nthreads, nthreads,1); dim3 grid_dim(n/nthreads, n/nthreads,1); /* Start GPU computation timer */ CUDA_CALL(cudaEventRecord(kstart, 0)); while(1) { evolve9ptCUDA<<<grid_dim, block_dim>>>(un, uc, uo, pb, n, h, dt, t); temp = uc; uc = un; un = uo; uo = temp; if(!tpdt(&t, dt, end_time)) break; } cudaMemcpy(u, uc, sizeof(double) * n * n, cudaMemcpyDeviceToHost); /* Stop GPU computation timer */ CUDA_CALL(cudaEventRecord(kstop, 0)); CUDA_CALL(cudaEventSynchronize(kstop)); CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop)); printf("GPU computation: %f msec\n", ktime); cudaFree(un); cudaFree(uc); cudaFree(uo); cudaFree(pb); /* timer cleanup */ CUDA_CALL(cudaEventDestroy(kstart)); CUDA_CALL(cudaEventDestroy(kstop)); }
319f796881717e2b20764a87786961665b753665.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 3; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<5;v++) { cue = (cue - uon*conj(aon-hilva(cue)))/(cue + aon*conj(uon*helva(cue))); cue = (cue + uon*conj(aon-hilva(cue)))/(cue -aon*conj(uon*helva(cue))); cue = (cue - uon*conj(aon-hilva(cue))); accume = accume + aon*conj(uon*helva(cue)); /*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/ } cue = accume; for(v=0;v<5;v++) { cue = (cue - uon*conj(aon-hilva(cue)))/(cue + aon*conj(uon*helva(cue))); cue = (cue + uon*conj(aon-hilva(cue)))/(cue -aon*conj(uon*helva(cue))); cue = (cue - uon*conj(aon-hilva(cue))); accume = accume + aon*conj(uon*helva(cue)); /*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/ } accume = uon*the3(q,flat(accume)*fixon*aon)/powc(conj(the3(q,flat(accume)*faxon*uon)),uon); cue = hilva(accume); double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
319f796881717e2b20764a87786961665b753665.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 3; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<5;v++) { cue = (cue - uon*conj(aon-hilva(cue)))/(cue + aon*conj(uon*helva(cue))); cue = (cue + uon*conj(aon-hilva(cue)))/(cue -aon*conj(uon*helva(cue))); cue = (cue - uon*conj(aon-hilva(cue))); accume = accume + aon*conj(uon*helva(cue)); /*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/ } cue = accume; for(v=0;v<5;v++) { cue = (cue - uon*conj(aon-hilva(cue)))/(cue + aon*conj(uon*helva(cue))); cue = (cue + uon*conj(aon-hilva(cue)))/(cue -aon*conj(uon*helva(cue))); cue = (cue - uon*conj(aon-hilva(cue))); accume = accume + aon*conj(uon*helva(cue)); /*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/ } accume = uon*the3(q,flat(accume)*fixon*aon)/powc(conj(the3(q,flat(accume)*faxon*uon)),uon); cue = hilva(accume); double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
5a1f60f7dbbf9d46423df63eae70dad5a9108c14.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/color.hpp" #include "cvt_color_internal.h" namespace cv { namespace gpu { namespace device { OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_x = 8 }; enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; #define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \ void name(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream) \ { \ traits::functor_type functor = traits::create_functor(); \ typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::result_type dst_t; \ cv::gpu::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \ } #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra) #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
5a1f60f7dbbf9d46423df63eae70dad5a9108c14.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/transform.hpp" #include "opencv2/gpu/device/color.hpp" #include "cvt_color_internal.h" namespace cv { namespace gpu { namespace device { OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_x = 8 }; enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type) { enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type) { enum { smart_block_dim_y = 8 }; enum { smart_shift = 4 }; }; #define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \ void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) \ { \ traits::functor_type functor = traits::create_functor(); \ typedef typename traits::functor_type::argument_type src_t; \ typedef typename traits::functor_type::result_type dst_t; \ cv::gpu::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \ } #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) #define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \ OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra) OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra) #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F #undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
47098a8d09a6edfe77c71b6cd86f3b232f031e3e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Copyright 2019 BlazingDB, Inc. * Copyright 2019 Eyal Rozenberg <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/legacy/cudf_test_utils.cuh> #include <nvstrings/NVCategory.h> #include <nvstrings/NVStrings.h> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <tests/utilities/legacy/nvcategory_utils.cuh> #include <cudf/legacy/functions.h> namespace { static constexpr char null_signifier = '@'; namespace detail { // When streaming char-like types, the standard library streams tend to treat // them as characters rather than numbers, e.g. you would get an 'a' instead of 97. // The following function(s) ensure we "promote" such values to integers before // they're streamed template <typename T> const T& promote_for_streaming(const T& x) { return x; } //int promote_for_streaming(const char& x) { return x; } //int promote_for_streaming(const unsigned char& x) { return x; } int promote_for_streaming(const signed char& x) { return x; } } // namespace detail struct column_printer { template<typename Element> void operator()(gdf_column const* the_column, unsigned min_printing_width, std::ostream& stream) { cudf::size_type num_rows { the_column->size }; Element const* column_data { static_cast<Element const*>(the_column->data) }; std::vector<Element> host_side_data(num_rows); hipMemcpy(host_side_data.data(), column_data, num_rows * sizeof(Element), hipMemcpyDeviceToHost); cudf::size_type const num_masks { gdf_valid_allocation_size(num_rows) }; std::vector<cudf::valid_type> h_mask(num_masks, ~cudf::valid_type { 0 }); if (nullptr != the_column->valid) { hipMemcpy(h_mask.data(), the_column->valid, num_masks * sizeof(cudf::valid_type), hipMemcpyDeviceToHost); } for (cudf::size_type i = 0; i < num_rows; ++i) { stream << std::setw(min_printing_width); if (gdf_is_valid(h_mask.data(), i)) { stream << detail::promote_for_streaming(host_side_data[i]); } else { stream << null_representative; } stream << ' '; } stream << std::endl; if(the_column->dtype == GDF_STRING_CATEGORY){ stream<<"Category Data (index | key):\n"; if(the_column->dtype_info.category != nullptr){ NVCategory *category = static_cast<NVCategory *>(the_column->dtype_info.category); size_t keys_size = category->keys_size(); NVStrings *keys = category->get_keys(); if (keys_size>0) { char ** data = new char *[keys_size]; int * byte_sizes = new int[keys_size]; keys->byte_count(byte_sizes, false); for(size_t i=0; i<keys_size; i++){ data[i]=new char[::max(2, byte_sizes[i])]; } keys->to_host(data, 0, keys_size); for(size_t i=0; i<keys_size; i++){ // null terminate strings // TODO: nvstrings overwrites data[i] ifit is a null string // Update this based on resolution of https://github.com/rapidsai/custrings/issues/330 if (byte_sizes[i]!=-1) data[i][byte_sizes[i]]=0; } for(size_t i=0; i<keys_size; i++){ // print category strings stream << "(" << i << "|"; if (data[i] == nullptr) stream << null_signifier; // account for null else stream << data[i]; stream << ")\t"; } stream<<std::endl; for(size_t i=0; i<keys_size; i++){ delete data[i]; } delete [] data; delete [] byte_sizes; } } } } }; /**---------------------------------------------------------------------------* * @brief Functor for comparing whether two elements from two gdf_columns are * equal. * *---------------------------------------------------------------------------**/ template <typename T> struct elements_equal { gdf_column lhs_col; gdf_column rhs_col; bool nulls_are_equivalent; using bit_mask_t = bit_mask::bit_mask_t; /**---------------------------------------------------------------------------* * @brief Constructs functor for comparing elements between two gdf_column's * * @param lhs The left column for comparison * @param rhs The right column for comparison * @param nulls_are_equal Desired behavior for whether or not nulls are * treated as equal to other nulls. Defaults to true. *---------------------------------------------------------------------------**/ __host__ __device__ elements_equal(gdf_column lhs, gdf_column rhs, bool nulls_are_equal = true) : lhs_col{lhs}, rhs_col{rhs}, nulls_are_equivalent{nulls_are_equal} {} __device__ bool operator()(cudf::size_type row) { bool const lhs_is_valid{gdf_is_valid(lhs_col.valid, row)}; bool const rhs_is_valid{gdf_is_valid(rhs_col.valid, row)}; if (lhs_is_valid and rhs_is_valid) { return static_cast<T const*>(lhs_col.data)[row] == static_cast<T const*>(rhs_col.data)[row]; } // If one value is valid but the other is not if (lhs_is_valid != rhs_is_valid) { return false; } return nulls_are_equivalent; } }; } // namespace anonymous /** * ---------------------------------------------------------------------------* * @brief Compare two gdf_columns on all fields, including pairwise comparison * of data and valid arrays * * @tparam T The type of columns to compare * @param left The left column * @param right The right column * @return bool Whether or not the columns are equal * ---------------------------------------------------------------------------**/ template <typename T> bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { if (left.size != right.size) return false; if (left.dtype != right.dtype) return false; if (left.null_count != right.null_count) return false; if (left.dtype_info.time_unit != right.dtype_info.time_unit) return false; if ((left.col_name == nullptr) != (right.col_name == nullptr)) return false; // if one is null but not both if (left.col_name != nullptr && std::strcmp(left.col_name, right.col_name) != 0) return false; if ((left.data == nullptr) != (right.data == nullptr)) return false; // if one is null but not both if ((left.valid == nullptr) != (right.valid == nullptr)) return false; // if one is null but not both if (left.data == nullptr) return true; // logically, both are null if (left.dtype == GDF_STRING_CATEGORY) { // Transfer input column to host std::vector<std::string> left_data, right_data; std::vector<cudf::valid_type> left_bitmask, right_bitmask; std::tie(left_data, left_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&left)); std::tie(right_data, right_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&right)); CHECK_CUDA(0); if (left_data.size() != right_data.size()) return false; for (size_t i = 0; i < left_data.size(); i++) { bool const left_is_valid{gdf_is_valid(left_bitmask.data(), i)}; bool const right_is_valid{gdf_is_valid(right_bitmask.data(), i)}; if (left_is_valid != right_is_valid) return false; else if (left_is_valid && (left_data[i] != right_data[i])) return false; } return true; } else { if ((left.dtype_info.category != nullptr) || (right.dtype_info.category != nullptr)) return false; // category must be nullptr bool equal_data = thrust::all_of(rmm::exec_policy()->on(0), thrust::make_counting_iterator(0), thrust::make_counting_iterator(left.size), elements_equal<T>{left, right}); CHECK_CUDA(0); return equal_data; } } namespace { struct columns_equal { template <typename T> bool operator()(gdf_column const& left, gdf_column const& right) { return gdf_equal_columns<T>(left, right); } }; }; // namespace anonymous // Type-erased version of gdf_equal_columns bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { return cudf::type_dispatcher(left.dtype, columns_equal{}, left, right); } void print_gdf_column(gdf_column const * the_column, unsigned min_printing_width, std::ostream& stream) { cudf::type_dispatcher(the_column->dtype, column_printer{}, the_column, min_printing_width, stream); } void print_valid_data(const cudf::valid_type *validity_mask, const size_t num_rows, std::ostream& stream) { hipError_t error; hipPointerAttribute_t attrib; hipPointerGetAttributes(&attrib, validity_mask); error = hipGetLastError(); std::vector<cudf::valid_type> h_mask(gdf_valid_allocation_size(num_rows)); if (error != hipErrorInvalidValue && isDeviceType(attrib)) hipMemcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows), hipMemcpyDeviceToHost); else memcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows)); std::transform( h_mask.begin(), h_mask.begin() + gdf_num_bitmask_elements(num_rows), std::ostream_iterator<std::string>(stream, " "), [](cudf::valid_type x) { auto bits = std::bitset<GDF_VALID_BITSIZE>(x).to_string(null_signifier); return std::string(bits.rbegin(), bits.rend()); }); stream << std::endl; } cudf::size_type count_valid_bits_host( std::vector<cudf::valid_type> const& masks, cudf::size_type const num_rows) { if ((0 == num_rows) || (0 == masks.size())) { return 0; } cudf::size_type count{0}; // Count the valid bits for all masks except the last one for (cudf::size_type i = 0; i < (gdf_num_bitmask_elements(num_rows) - 1); ++i) { cudf::valid_type current_mask = masks[i]; while (current_mask > 0) { current_mask &= (current_mask - 1); count++; } } // Only count the bits in the last mask that correspond to rows int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE; if (num_rows_last_mask == 0) { num_rows_last_mask = GDF_VALID_BITSIZE; } // Mask off only the bits that correspond to rows cudf::valid_type const rows_mask = ( cudf::valid_type{1} << num_rows_last_mask ) - 1; cudf::valid_type last_mask = masks[gdf_num_bitmask_elements(num_rows) - 1] & rows_mask; while (last_mask > 0) { last_mask &= (last_mask - 1); count++; } return count; }
47098a8d09a6edfe77c71b6cd86f3b232f031e3e.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Copyright 2019 BlazingDB, Inc. * Copyright 2019 Eyal Rozenberg <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/legacy/cudf_test_utils.cuh> #include <nvstrings/NVCategory.h> #include <nvstrings/NVStrings.h> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <tests/utilities/legacy/nvcategory_utils.cuh> #include <cudf/legacy/functions.h> namespace { static constexpr char null_signifier = '@'; namespace detail { // When streaming char-like types, the standard library streams tend to treat // them as characters rather than numbers, e.g. you would get an 'a' instead of 97. // The following function(s) ensure we "promote" such values to integers before // they're streamed template <typename T> const T& promote_for_streaming(const T& x) { return x; } //int promote_for_streaming(const char& x) { return x; } //int promote_for_streaming(const unsigned char& x) { return x; } int promote_for_streaming(const signed char& x) { return x; } } // namespace detail struct column_printer { template<typename Element> void operator()(gdf_column const* the_column, unsigned min_printing_width, std::ostream& stream) { cudf::size_type num_rows { the_column->size }; Element const* column_data { static_cast<Element const*>(the_column->data) }; std::vector<Element> host_side_data(num_rows); cudaMemcpy(host_side_data.data(), column_data, num_rows * sizeof(Element), cudaMemcpyDeviceToHost); cudf::size_type const num_masks { gdf_valid_allocation_size(num_rows) }; std::vector<cudf::valid_type> h_mask(num_masks, ~cudf::valid_type { 0 }); if (nullptr != the_column->valid) { cudaMemcpy(h_mask.data(), the_column->valid, num_masks * sizeof(cudf::valid_type), cudaMemcpyDeviceToHost); } for (cudf::size_type i = 0; i < num_rows; ++i) { stream << std::setw(min_printing_width); if (gdf_is_valid(h_mask.data(), i)) { stream << detail::promote_for_streaming(host_side_data[i]); } else { stream << null_representative; } stream << ' '; } stream << std::endl; if(the_column->dtype == GDF_STRING_CATEGORY){ stream<<"Category Data (index | key):\n"; if(the_column->dtype_info.category != nullptr){ NVCategory *category = static_cast<NVCategory *>(the_column->dtype_info.category); size_t keys_size = category->keys_size(); NVStrings *keys = category->get_keys(); if (keys_size>0) { char ** data = new char *[keys_size]; int * byte_sizes = new int[keys_size]; keys->byte_count(byte_sizes, false); for(size_t i=0; i<keys_size; i++){ data[i]=new char[std::max(2, byte_sizes[i])]; } keys->to_host(data, 0, keys_size); for(size_t i=0; i<keys_size; i++){ // null terminate strings // TODO: nvstrings overwrites data[i] ifit is a null string // Update this based on resolution of https://github.com/rapidsai/custrings/issues/330 if (byte_sizes[i]!=-1) data[i][byte_sizes[i]]=0; } for(size_t i=0; i<keys_size; i++){ // print category strings stream << "(" << i << "|"; if (data[i] == nullptr) stream << null_signifier; // account for null else stream << data[i]; stream << ")\t"; } stream<<std::endl; for(size_t i=0; i<keys_size; i++){ delete data[i]; } delete [] data; delete [] byte_sizes; } } } } }; /**---------------------------------------------------------------------------* * @brief Functor for comparing whether two elements from two gdf_columns are * equal. * *---------------------------------------------------------------------------**/ template <typename T> struct elements_equal { gdf_column lhs_col; gdf_column rhs_col; bool nulls_are_equivalent; using bit_mask_t = bit_mask::bit_mask_t; /**---------------------------------------------------------------------------* * @brief Constructs functor for comparing elements between two gdf_column's * * @param lhs The left column for comparison * @param rhs The right column for comparison * @param nulls_are_equal Desired behavior for whether or not nulls are * treated as equal to other nulls. Defaults to true. *---------------------------------------------------------------------------**/ __host__ __device__ elements_equal(gdf_column lhs, gdf_column rhs, bool nulls_are_equal = true) : lhs_col{lhs}, rhs_col{rhs}, nulls_are_equivalent{nulls_are_equal} {} __device__ bool operator()(cudf::size_type row) { bool const lhs_is_valid{gdf_is_valid(lhs_col.valid, row)}; bool const rhs_is_valid{gdf_is_valid(rhs_col.valid, row)}; if (lhs_is_valid and rhs_is_valid) { return static_cast<T const*>(lhs_col.data)[row] == static_cast<T const*>(rhs_col.data)[row]; } // If one value is valid but the other is not if (lhs_is_valid != rhs_is_valid) { return false; } return nulls_are_equivalent; } }; } // namespace anonymous /** * ---------------------------------------------------------------------------* * @brief Compare two gdf_columns on all fields, including pairwise comparison * of data and valid arrays * * @tparam T The type of columns to compare * @param left The left column * @param right The right column * @return bool Whether or not the columns are equal * ---------------------------------------------------------------------------**/ template <typename T> bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { if (left.size != right.size) return false; if (left.dtype != right.dtype) return false; if (left.null_count != right.null_count) return false; if (left.dtype_info.time_unit != right.dtype_info.time_unit) return false; if ((left.col_name == nullptr) != (right.col_name == nullptr)) return false; // if one is null but not both if (left.col_name != nullptr && std::strcmp(left.col_name, right.col_name) != 0) return false; if ((left.data == nullptr) != (right.data == nullptr)) return false; // if one is null but not both if ((left.valid == nullptr) != (right.valid == nullptr)) return false; // if one is null but not both if (left.data == nullptr) return true; // logically, both are null if (left.dtype == GDF_STRING_CATEGORY) { // Transfer input column to host std::vector<std::string> left_data, right_data; std::vector<cudf::valid_type> left_bitmask, right_bitmask; std::tie(left_data, left_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&left)); std::tie(right_data, right_bitmask) = cudf::test::nvcategory_column_to_host(const_cast<gdf_column*>(&right)); CHECK_CUDA(0); if (left_data.size() != right_data.size()) return false; for (size_t i = 0; i < left_data.size(); i++) { bool const left_is_valid{gdf_is_valid(left_bitmask.data(), i)}; bool const right_is_valid{gdf_is_valid(right_bitmask.data(), i)}; if (left_is_valid != right_is_valid) return false; else if (left_is_valid && (left_data[i] != right_data[i])) return false; } return true; } else { if ((left.dtype_info.category != nullptr) || (right.dtype_info.category != nullptr)) return false; // category must be nullptr bool equal_data = thrust::all_of(rmm::exec_policy()->on(0), thrust::make_counting_iterator(0), thrust::make_counting_iterator(left.size), elements_equal<T>{left, right}); CHECK_CUDA(0); return equal_data; } } namespace { struct columns_equal { template <typename T> bool operator()(gdf_column const& left, gdf_column const& right) { return gdf_equal_columns<T>(left, right); } }; }; // namespace anonymous // Type-erased version of gdf_equal_columns bool gdf_equal_columns(gdf_column const& left, gdf_column const& right) { return cudf::type_dispatcher(left.dtype, columns_equal{}, left, right); } void print_gdf_column(gdf_column const * the_column, unsigned min_printing_width, std::ostream& stream) { cudf::type_dispatcher(the_column->dtype, column_printer{}, the_column, min_printing_width, stream); } void print_valid_data(const cudf::valid_type *validity_mask, const size_t num_rows, std::ostream& stream) { cudaError_t error; cudaPointerAttributes attrib; cudaPointerGetAttributes(&attrib, validity_mask); error = cudaGetLastError(); std::vector<cudf::valid_type> h_mask(gdf_valid_allocation_size(num_rows)); if (error != cudaErrorInvalidValue && isDeviceType(attrib)) cudaMemcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows), cudaMemcpyDeviceToHost); else memcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows)); std::transform( h_mask.begin(), h_mask.begin() + gdf_num_bitmask_elements(num_rows), std::ostream_iterator<std::string>(stream, " "), [](cudf::valid_type x) { auto bits = std::bitset<GDF_VALID_BITSIZE>(x).to_string(null_signifier); return std::string(bits.rbegin(), bits.rend()); }); stream << std::endl; } cudf::size_type count_valid_bits_host( std::vector<cudf::valid_type> const& masks, cudf::size_type const num_rows) { if ((0 == num_rows) || (0 == masks.size())) { return 0; } cudf::size_type count{0}; // Count the valid bits for all masks except the last one for (cudf::size_type i = 0; i < (gdf_num_bitmask_elements(num_rows) - 1); ++i) { cudf::valid_type current_mask = masks[i]; while (current_mask > 0) { current_mask &= (current_mask - 1); count++; } } // Only count the bits in the last mask that correspond to rows int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE; if (num_rows_last_mask == 0) { num_rows_last_mask = GDF_VALID_BITSIZE; } // Mask off only the bits that correspond to rows cudf::valid_type const rows_mask = ( cudf::valid_type{1} << num_rows_last_mask ) - 1; cudf::valid_type last_mask = masks[gdf_num_bitmask_elements(num_rows) - 1] & rows_mask; while (last_mask > 0) { last_mask &= (last_mask - 1); count++; } return count; }
1692984aca350f9ac49f03ed930e5b73f7ba265a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "err.h" void PrintLastError(const char *file, int line) { hipError_t err=hipGetLastError(); if(err!=hipSuccess) fprintf(stderr, "%s in %s at line %d\n", hipGetErrorString(err), file, line); } bool CheckError(hipError_t err, const char *file, int line) { if(err==hipSuccess) return false; fprintf(stderr, "%s in %s at line %d\n", hipGetErrorString(err), file, line); return true; } void deviceDiagnostics(){ int deviceCount; CHECK_ERROR( hipGetDeviceCount(&deviceCount) ); printf("GPU devices :: %d \n", deviceCount); hipDeviceProp_t devProp[deviceCount]; for(int i = 0; i < deviceCount; ++i) { printf("*** CUDA Device #%d ***", i); CHECK_ERROR( hipGetDeviceProperties(&devProp[i], i) ); printf("%s ***\n", devProp[i].name); printf("\t%d.%d compute capability\n", devProp[i].major, devProp[i].minor); printf("\t%d multiprocessors\n", devProp[i].multiProcessorCount); printf("\t%.2fGB max mem pitch of %.2fGB global memory\n", devProp[i].memPitch/(1024.*1024.*1024), devProp[i].totalGlobalMem/(1024.*1024.*1024)); printf("\t%.2fKB total shared memory per block\n", devProp[i].sharedMemPerBlock/1024.); printf("\t%.2fKB total constant memory\n", devProp[i].totalConstMem/1024.); printf("\t%.2fK registers per block\n", devProp[i].regsPerBlock/1024.); printf("\t%d/%d threads per Warp/block\n", devProp[i].warpSize, devProp[i].maxThreadsPerBlock); printf("\tClock rate: %.2fGHz\n", devProp[i].clockRate*1e-6); printf("\tTexture alignment: %luB\n", devProp[i].textureAlignment); printf("\tConcurrent copy and execution: %s\n", (devProp[i].deviceOverlap ? "Yes" : "No")); printf("\tKernel execution timeout: %s\n", (devProp[i].kernelExecTimeoutEnabled ? "Yes" : "No")); } } #include "im3D.hpp" int im3D_pars4save::init_from_command_line(char** argv) { if(strncmp(*argv,"--help",6)==0) return -1; if(strncmp(*argv,"--devQ",6)==0) { deviceDiagnostics(); return 1; } if(strcmp(*argv,"--box")==0) read_float3(BoxFactor, argv[1]); else if(strcmp(*argv,"--load")==0) load_from_file(argv[1]); else if(strcmp(*argv,"--mesh")==0) read_float3(MeshBox, argv[1]); else if(strcmp(*argv,"--sh_mesh")==0) read_float3(MeshShift, argv[1]); else if(strcmp(*argv,"--Dmesh")==0) Dmesh=read_float(argv[1]); else if(strcmp(*argv,"--zoom")==0) read_float3(Dzoom, argv[1]); else if(strcmp(*argv,"--add")==0) read_float3(Dadd, argv[1]); else if(strcmp(*argv,"--shrink")==0) read_int3(Dshrink, argv[1]); else if(strcmp(*argv,"--Narr")==0) read_int3(Narr, argv[1]); else if(strcmp(*argv,"--step")==0) read_float3(step, argv[1]); else if(strcmp(*argv,"--base")==0) read_float3(base, argv[1]); else if(strcmp(*argv,"--bkgr_col")==0) read_float3(bkgr_col, argv[1]); else if(strcmp(*argv,"--mesh_col")==0) read_float3(mesh_col, argv[1]); else if(strcmp(*argv,"--box_col")==0) read_float3(box_col, argv[1]); else if(strcmp(*argv,"--rot_point")==0) read_float3(RotPoint, argv[1]); else if(strcmp(*argv,"--box_shrink")==0) read_float3(box_shrink, argv[1]); else if(strcmp(*argv,"--drop_dir")==0) strcpy(drop_dir,argv[1]); else if(strcmp(*argv,"--cntr")==0) cntr_levels[cntr_num++]=read_float(argv[1]); else if(strcmp(*argv,"--ld_sz")==0) read_int2(ld_sz, argv[1]); else if(strcmp(*argv,"--nocomp")==0) return 1; else if(strcmp(*argv,"--norun")==0) return 1; else if(strcmp(*argv,"--redefine")==0) return 2; else { printf("Illegal parameters' syntax notation\n<%s>", *argv); return 0; } //else if(strcmp(*argv,"--")==0) read_float3(, argv[1]); //printf("par: %s; vals: %s\n", argv[0], argv[1]); return 2; } const char* im3D_pars4save::command_line_help_string() { return "[--devQ] [--load <opt-file>] [--zoom \"1. 1. 1.\"] [--shrink \"1 1 1\"] [--step \"1. 1. 1.\"] [--base \"1. 1. 1.\"] [--box \"1. 1. 1.\"] [--mesh \"200. 200. 200.\"] [--Dmesh 5.] [--drop_dir \".\"] [--bkgr_col \"0.1 0.1 0.1\"] [--mesh_col \"0.8 0.8 0.2\"] [--box_col \"1. 1. 1.\"] [--box_shrink \"1. 1. 1.\"] [--sensor \"1 1 1\"]"; } void im3D_pars4save::print_command_line_help() { printf(" --devQ\t ;\n"); printf(" --load\t <opt-file>, <w/W>\n"); printf(" --zoom\t , 2D , [1. 1. 1.];\n"); printf(" --add \t . 3D 2D, [0. 0. 0.];\n"); printf(" --shrink\t , , , [1 1 1];\n"); printf(" --Narr\t ( =0, ) [0 0 0];\n"); printf(" --box \t 3D , [1. 1. 1.];\n"); printf(" --step \t , , [1. 1. 1.];\n"); printf(" --base \t , , [0. 0. 0.];\n"); printf(" --mesh\t ( ), [100. 100. 100.];\n"); printf(" --sh_mesh\t ( ), [0. 0. 0.];\n"); printf(" --Dmesh\t ( ), [5.];\n"); printf(" --drop_dir\t , , [.];\n"); printf(" --bkgr_col\t , [0.1 0.1 0.1];\n"); printf(" --mesh_col\t , [0.8 0.8 0.2];\n"); printf(" --box_col\t , [1.0 1.0 1.0];\n"); printf(" --box_shrink\t , [1.0 1.0 1.0];\n"); printf(" --rot_point\t , , [0.5 0.5 0.5];\n"); printf(" --sensor\t , ;\n"); printf(" --cntr\t , ;\n"); printf(" --ld_sz\t , [80 288];\n"); }
1692984aca350f9ac49f03ed930e5b73f7ba265a.cu
#include <cuda.h> #include <stdio.h> #include "err.h" void PrintLastError(const char *file, int line) { cudaError_t err=cudaGetLastError(); if(err!=cudaSuccess) fprintf(stderr, "%s in %s at line %d\n", cudaGetErrorString(err), file, line); } bool CheckError(cudaError_t err, const char *file, int line) { if(err==cudaSuccess) return false; fprintf(stderr, "%s in %s at line %d\n", cudaGetErrorString(err), file, line); return true; } void deviceDiagnostics(){ int deviceCount; CHECK_ERROR( cudaGetDeviceCount(&deviceCount) ); printf("GPU devices :: %d \n", deviceCount); cudaDeviceProp devProp[deviceCount]; for(int i = 0; i < deviceCount; ++i) { printf("*** CUDA Device #%d ***", i); CHECK_ERROR( cudaGetDeviceProperties(&devProp[i], i) ); printf("%s ***\n", devProp[i].name); printf("\t%d.%d compute capability\n", devProp[i].major, devProp[i].minor); printf("\t%d multiprocessors\n", devProp[i].multiProcessorCount); printf("\t%.2fGB max mem pitch of %.2fGB global memory\n", devProp[i].memPitch/(1024.*1024.*1024), devProp[i].totalGlobalMem/(1024.*1024.*1024)); printf("\t%.2fKB total shared memory per block\n", devProp[i].sharedMemPerBlock/1024.); printf("\t%.2fKB total constant memory\n", devProp[i].totalConstMem/1024.); printf("\t%.2fK registers per block\n", devProp[i].regsPerBlock/1024.); printf("\t%d/%d threads per Warp/block\n", devProp[i].warpSize, devProp[i].maxThreadsPerBlock); printf("\tClock rate: %.2fGHz\n", devProp[i].clockRate*1e-6); printf("\tTexture alignment: %luB\n", devProp[i].textureAlignment); printf("\tConcurrent copy and execution: %s\n", (devProp[i].deviceOverlap ? "Yes" : "No")); printf("\tKernel execution timeout: %s\n", (devProp[i].kernelExecTimeoutEnabled ? "Yes" : "No")); } } #include "im3D.hpp" int im3D_pars4save::init_from_command_line(char** argv) { if(strncmp(*argv,"--help",6)==0) return -1; if(strncmp(*argv,"--devQ",6)==0) { deviceDiagnostics(); return 1; } if(strcmp(*argv,"--box")==0) read_float3(BoxFactor, argv[1]); else if(strcmp(*argv,"--load")==0) load_from_file(argv[1]); else if(strcmp(*argv,"--mesh")==0) read_float3(MeshBox, argv[1]); else if(strcmp(*argv,"--sh_mesh")==0) read_float3(MeshShift, argv[1]); else if(strcmp(*argv,"--Dmesh")==0) Dmesh=read_float(argv[1]); else if(strcmp(*argv,"--zoom")==0) read_float3(Dzoom, argv[1]); else if(strcmp(*argv,"--add")==0) read_float3(Dadd, argv[1]); else if(strcmp(*argv,"--shrink")==0) read_int3(Dshrink, argv[1]); else if(strcmp(*argv,"--Narr")==0) read_int3(Narr, argv[1]); else if(strcmp(*argv,"--step")==0) read_float3(step, argv[1]); else if(strcmp(*argv,"--base")==0) read_float3(base, argv[1]); else if(strcmp(*argv,"--bkgr_col")==0) read_float3(bkgr_col, argv[1]); else if(strcmp(*argv,"--mesh_col")==0) read_float3(mesh_col, argv[1]); else if(strcmp(*argv,"--box_col")==0) read_float3(box_col, argv[1]); else if(strcmp(*argv,"--rot_point")==0) read_float3(RotPoint, argv[1]); else if(strcmp(*argv,"--box_shrink")==0) read_float3(box_shrink, argv[1]); else if(strcmp(*argv,"--drop_dir")==0) strcpy(drop_dir,argv[1]); else if(strcmp(*argv,"--cntr")==0) cntr_levels[cntr_num++]=read_float(argv[1]); else if(strcmp(*argv,"--ld_sz")==0) read_int2(ld_sz, argv[1]); else if(strcmp(*argv,"--nocomp")==0) return 1; else if(strcmp(*argv,"--norun")==0) return 1; else if(strcmp(*argv,"--redefine")==0) return 2; else { printf("Illegal parameters' syntax notation\n<%s>", *argv); return 0; } //else if(strcmp(*argv,"--")==0) read_float3(, argv[1]); //printf("par: %s; vals: %s\n", argv[0], argv[1]); return 2; } const char* im3D_pars4save::command_line_help_string() { return "[--devQ] [--load <opt-file>] [--zoom \"1. 1. 1.\"] [--shrink \"1 1 1\"] [--step \"1. 1. 1.\"] [--base \"1. 1. 1.\"] [--box \"1. 1. 1.\"] [--mesh \"200. 200. 200.\"] [--Dmesh 5.] [--drop_dir \".\"] [--bkgr_col \"0.1 0.1 0.1\"] [--mesh_col \"0.8 0.8 0.2\"] [--box_col \"1. 1. 1.\"] [--box_shrink \"1. 1. 1.\"] [--sensor \"1 1 1\"]"; } void im3D_pars4save::print_command_line_help() { printf(" --devQ\tВыдаёт информацию о видеокартах на компьютере;\n"); printf(" --load\tВводит параметры из файла <opt-file>, сохранённые ранее клавишей <w/W>\n"); printf(" --zoom\tмасштабный фактор, действует на 2D режим и размер окна, [1. 1. 1.];\n"); printf(" --add \tдобавляет пространство к размеру окна. Требуется для вывода 3D на фоне 2D, [0. 0. 0.];\n"); printf(" --shrink\tмасштабный фактор, действует везде, сокращает требования к памяти, [1 1 1];\n"); printf(" --Narr\tявно заданный размер массива (если =0, берётся из первого файла) [0 0 0];\n"); printf(" --box \tкоррекция пропорций размера бокса в 3D режиме, [1. 1. 1.];\n"); printf(" --step \tшаги между точками, действует только на тики, [1. 1. 1.];\n"); printf(" --base \tшаги между точками, действует только на тики, [0. 0. 0.];\n"); printf(" --mesh\tрасстояние между линиями сетки в боксе по координатам в ячейках (до коррекции), [100. 100. 100.];\n"); printf(" --sh_mesh\tсдвиг линий сетки в боксе по координатам в ячейках (до коррекции), [0. 0. 0.];\n"); printf(" --Dmesh\tширина линии сетки в пикселях (со сглаживанием выглядит несколько уже), [5.];\n"); printf(" --drop_dir\tимя директории, в которую будут сохраняться различные файлы, [.];\n"); printf(" --bkgr_col\tцвет фона, [0.1 0.1 0.1];\n"); printf(" --mesh_col\tцвет линий сетки, [0.8 0.8 0.2];\n"); printf(" --box_col\tцвет линий бокса, [1.0 1.0 1.0];\n"); printf(" --box_shrink\t коэффициент растяжения размеров бокса, [1.0 1.0 1.0];\n"); printf(" --rot_point\t точка в боксе, относительно которой производится вращение, [0.5 0.5 0.5];\n"); printf(" --sensor\tкоординаты сенсора, можно задавать несколько сенсоров;\n"); printf(" --cntr\tзначение уровня контура, можно задавать несколько уровней;\n"); printf(" --ld_sz\tчтение сохраненных ранее параметров в режиме совместимости, [80 288];\n"); }
5061d6572916317d85754fb5822b076a8bd4da5b.hip
// !!! This is a file automatically generated by hipify!!! /** * gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <hip/hip_runtime.h> #include "../common/polybenchUtilFuncts.h" #include <hip/hip_runtime.h> #include "rocblas.h" #define IDX2C(i ,j , ld) ((( j )*( ld ))+( i )) #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size */ #define NI 4096 #define NJ 4096 #define NK 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 32412.0f #define BETA 2123.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i,j,k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] *= BETA; for (k = 0; k < NK; ++k) { C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j]; } } } } void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*j) / NJ; } } for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] = ((DATA_TYPE) i*j) / NJ; } } } void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu) { int i, j, fail; fail = 0; // Compare C1 and C2 for (i=0; i < NI; i++) { for (j=0; j < NJ; j++) { if (percentDiff(C[i*NJ + j], C_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void gemm_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { c[i * NJ + j] *= BETA; int k; for(k=0; k < NK; k++) { c[i * NJ + j] += ALPHA * a[i * NK + k] * b[k * NJ +j]; } } } void gemmCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu) { double t_start, t_end; double t_start_k, t_end_k; hipblasStatus_t stat; hipblasHandle_t handle; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE alpha = ALPHA; DATA_TYPE beta = BETA; stat = hipblasCreate(&handle); t_start = rtclock(); hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice); hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice); t_start_k = rtclock(); stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, NI, NJ, NK, &alpha, A_gpu, NK, B_gpu, NJ, &beta, C_gpu, NI); if(stat != HIPBLAS_STATUS_SUCCESS){ printf("Error in culbas sgemv 1, error code = %d\n", stat); return; } hipDeviceSynchronize(); t_end_k = rtclock(); hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyDeviceToHost); t_end = rtclock(); fprintf(stdout, "cBLAS kernel : %0.6lf\n", t_end_k - t_start_k); fprintf(stdout, "cBLAS copy + kernel : %0.6lf\n", t_end - t_start); hipblasDestroy(handle); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); } int main(int argc, char *argv[]) { DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE* C_outputFromGpu; A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); C_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); init(A, B, C); GPU_argv_init(); gemmCuda(A, B, C, C_outputFromGpu); /* double t_start, t_end; t_start = rtclock(); gemm(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_outputFromGpu); */ free(A); free(B); free(C); free(C_outputFromGpu); return 0; }
5061d6572916317d85754fb5822b076a8bd4da5b.cu
/** * gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #include "../common/polybenchUtilFuncts.h" #include <cuda_runtime.h> #include "cublas_v2.h" #define IDX2C(i ,j , ld) ((( j )*( ld ))+( i )) #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size */ #define NI 4096 #define NJ 4096 #define NK 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 32412.0f #define BETA 2123.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i,j,k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] *= BETA; for (k = 0; k < NK; ++k) { C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j]; } } } } void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*j) / NJ; } } for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] = ((DATA_TYPE) i*j) / NJ; } } } void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu) { int i, j, fail; fail = 0; // Compare C1 and C2 for (i=0; i < NI; i++) { for (j=0; j < NJ; j++) { if (percentDiff(C[i*NJ + j], C_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void gemm_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { c[i * NJ + j] *= BETA; int k; for(k=0; k < NK; k++) { c[i * NJ + j] += ALPHA * a[i * NK + k] * b[k * NJ +j]; } } } void gemmCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu) { double t_start, t_end; double t_start_k, t_end_k; cublasStatus_t stat; cublasHandle_t handle; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE alpha = ALPHA; DATA_TYPE beta = BETA; stat = cublasCreate(&handle); t_start = rtclock(); cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice); t_start_k = rtclock(); stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, NI, NJ, NK, &alpha, A_gpu, NK, B_gpu, NJ, &beta, C_gpu, NI); if(stat != CUBLAS_STATUS_SUCCESS){ printf("Error in culbas sgemv 1, error code = %d\n", stat); return; } cudaDeviceSynchronize(); t_end_k = rtclock(); cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyDeviceToHost); t_end = rtclock(); fprintf(stdout, "cBLAS kernel : %0.6lf\n", t_end_k - t_start_k); fprintf(stdout, "cBLAS copy + kernel : %0.6lf\n", t_end - t_start); cublasDestroy(handle); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); } int main(int argc, char *argv[]) { DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE* C_outputFromGpu; A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); C_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); init(A, B, C); GPU_argv_init(); gemmCuda(A, B, C, C_outputFromGpu); /* double t_start, t_end; t_start = rtclock(); gemm(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_outputFromGpu); */ free(A); free(B); free(C); free(C_outputFromGpu); return 0; }
stdmean.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> using namespace std; //standard deviation = sqrt(summation(x-mean^2) / n) //Did not figure out why this code doesn't work with threads __global__ void standard_deviation(int *a, float *b, float mean, int n) { int tid = blockIdx.x; //int tid - threadIdx.x; b[0] = 0.0; for(int i = tid; i < n; i++) { b[0] += (a[i] - mean) * (a[i] - mean); //printf("b[%d] = %d, a[%d] = %d", i, b[0], i, a[i]); } b[0] = b[0]/n; } int main() { int n; cin>>n; //int a[n]; //does not work on some cuda versions int *a = (int *)malloc(n * sizeof(int)); cout<<"The input numbers are: "<<endl; for(int i = 0; i < n; i++) { a[i] = i+1; cout<<a[i]<<"\t"; } cout<<endl; float mean = (n + 1)/2; cout<<"Mean: "<<mean<<endl; int *dev_a; float *dev_b; hipMalloc(&dev_a, n * sizeof(int)); hipMalloc(&dev_b, sizeof(float)); hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( standard_deviation), dim3(n), dim3(1), 0, 0, dev_a, dev_b, mean, n); float *ans = (float *)malloc(sizeof(float)); hipMemcpy(ans, dev_b, sizeof(float), hipMemcpyDeviceToHost); cout<<"The answer is: "<< sqrt(ans[0])<<endl; }
stdmean.cu
#include<iostream> using namespace std; //standard deviation = sqrt(summation(x-mean^2) / n) //Did not figure out why this code doesn't work with threads __global__ void standard_deviation(int *a, float *b, float mean, int n) { int tid = blockIdx.x; //int tid - threadIdx.x; b[0] = 0.0; for(int i = tid; i < n; i++) { b[0] += (a[i] - mean) * (a[i] - mean); //printf("b[%d] = %d, a[%d] = %d", i, b[0], i, a[i]); } b[0] = b[0]/n; } int main() { int n; cin>>n; //int a[n]; //does not work on some cuda versions int *a = (int *)malloc(n * sizeof(int)); cout<<"The input numbers are: "<<endl; for(int i = 0; i < n; i++) { a[i] = i+1; cout<<a[i]<<"\t"; } cout<<endl; float mean = (n + 1)/2; cout<<"Mean: "<<mean<<endl; int *dev_a; float *dev_b; cudaMalloc(&dev_a, n * sizeof(int)); cudaMalloc(&dev_b, sizeof(float)); cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice); standard_deviation<<<n, 1>>>(dev_a, dev_b, mean, n); float *ans = (float *)malloc(sizeof(float)); cudaMemcpy(ans, dev_b, sizeof(float), cudaMemcpyDeviceToHost); cout<<"The answer is: "<< sqrt(ans[0])<<endl; }
a5904cf793d96949ff28df72df4b38a8fca968e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "zero/Zero.cuh" __device__ int xorShift(int seed) { int updated = seed; updated ^= updated << 13; updated ^= updated >> 17; updated ^= updated << 5; return updated; } /* dropout probability is 1 - keep probability and should be less than 1. seed + 2147483648.0: [0, 2^32/2 + 2^32/2-1 = 4294967295] (seed + 2147483648.0) / 4294967295.0: [0 to 1] (seed + 2147483648.0) / 4294967295.0 - dropout probability): (0 to 1] ceilf(seed + 2147483648.0) / 4294967295.0 - dropout probability): or or 1 */ __device__ float generateMask(float seed, float dropoutProbability) { return ceilf((seed + 2147483648.0) / 4294967295.0 - dropoutProbability); } extern "C" __global__ void dropoutTrainingKernel (int batchSize, int numberEntriesPerInstance, int numberIterations, float dropoutProbability, float* input, int* seeds, float* masks, float* result) { // What's the first entry index within the instance that this thread should operate on? int startIndexWithinInstance = blockIdx.y * (blockDim.x * numberIterations) + threadIdx.x * numberIterations; // Continue if this index is smaller than the dimension of the instance. if(startIndexWithinInstance < numberEntriesPerInstance) { // What's the first entry index within the batch that this thread should operate on? int startIndexWithinBatch = blockIdx.x * numberEntriesPerInstance + startIndexWithinInstance; // Is the instance greater than the current batch size? if(blockIdx.x >= batchSize) { setToZero(result, startIndexWithinBatch, numberIterations); } else { for(int indexEntry = startIndexWithinBatch; indexEntry < startIndexWithinBatch + numberIterations; indexEntry++) { int newSeed = xorShift(seeds[indexEntry]); seeds[indexEntry] = newSeed; float mask = generateMask((float)newSeed, dropoutProbability); masks[indexEntry] = mask; result[indexEntry] = mask * input[indexEntry]; } } } }
a5904cf793d96949ff28df72df4b38a8fca968e2.cu
#include "zero/Zero.cuh" __device__ int xorShift(int seed) { int updated = seed; updated ^= updated << 13; updated ^= updated >> 17; updated ^= updated << 5; return updated; } /* dropout probability is 1 - keep probability and should be less than 1. seed + 2147483648.0: [0, 2^32/2 + 2^32/2-1 = 4294967295] (seed + 2147483648.0) / 4294967295.0: [0 to 1] (seed + 2147483648.0) / 4294967295.0 - dropout probability): (0 to 1] ceilf(seed + 2147483648.0) / 4294967295.0 - dropout probability): or or 1 */ __device__ float generateMask(float seed, float dropoutProbability) { return ceilf((seed + 2147483648.0) / 4294967295.0 - dropoutProbability); } extern "C" __global__ void dropoutTrainingKernel (int batchSize, int numberEntriesPerInstance, int numberIterations, float dropoutProbability, float* input, int* seeds, float* masks, float* result) { // What's the first entry index within the instance that this thread should operate on? int startIndexWithinInstance = blockIdx.y * (blockDim.x * numberIterations) + threadIdx.x * numberIterations; // Continue if this index is smaller than the dimension of the instance. if(startIndexWithinInstance < numberEntriesPerInstance) { // What's the first entry index within the batch that this thread should operate on? int startIndexWithinBatch = blockIdx.x * numberEntriesPerInstance + startIndexWithinInstance; // Is the instance greater than the current batch size? if(blockIdx.x >= batchSize) { setToZero(result, startIndexWithinBatch, numberIterations); } else { for(int indexEntry = startIndexWithinBatch; indexEntry < startIndexWithinBatch + numberIterations; indexEntry++) { int newSeed = xorShift(seeds[indexEntry]); seeds[indexEntry] = newSeed; float mask = generateMask((float)newSeed, dropoutProbability); masks[indexEntry] = mask; result[indexEntry] = mask * input[indexEntry]; } } } }
c248aa17335d03e525c8bba991288b66317b2072.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <hip/hip_runtime_api.h> #include <errno.h> #include <unistd.h> typedef struct point_t{ double x; double y; }point_t; int n_data = 1000; __device__ int d_n_data =1000; point_t data[] = { {67.05,130.55},{72.71,110.93},{73.87,116.95},{82.83,121.67}, {65.48,107.40},{71.05,153.65},{71.18,129.72},{65.97,111.80}, {78.00,128.82},{69.58,122.06},{83.71,132.56},{72.93,124.42}, {32.30,82.52},{98.06,159.12},{36.00,96.63},{57.30,97.95}, {31.09,81.46},{52.39,101.61},{ 2.44,49.27},{51.19,98.18}, {24.56,64.33},{93.68,128.37},{24.69,55.03},{75.87,117.14}, { 7.81,60.05},{58.21,103.17},{68.62,119.70},{66.21,122.28}, {54.02,106.00},{21.10,60.60},{21.29,77.59},{74.63,104.05}, {61.34,107.26},{91.52,135.16},{83.53,132.52},{14.41,50.37}, {16.45,43.68},{63.25,92.18},{98.79,157.08},{60.62,100.78}, {50.34,88.46},{ 9.42,56.21},{90.24,140.16},{ 7.81,56.32}, { 0.98,60.53},{26.20,67.08},{37.89,82.70},{15.13,48.75}, {99.28,147.44},{83.72,147.08},{54.34,101.58},{99.46,142.41}, {54.05,104.09},{95.03,129.33},{86.14,137.63},{95.72,166.69}, {47.38,104.52},{89.50,125.54},{78.82,124.78},{ 9.78,52.28}, {24.43,67.41},{95.62,155.09},{94.65,164.02},{ 5.08,43.64}, {28.19,66.34},{64.44,112.47},{97.73,128.82},{10.83,32.85}, {12.09,53.74},{48.30,103.34},{93.51,141.33},{99.95,139.83}, {11.08,40.78},{46.90,107.80},{16.35,49.45},{33.31,45.91}, {14.21,49.22},{16.01,70.02},{49.39,87.52},{82.92,124.35}, {46.91,93.72},{24.80,55.76},{26.93,64.60},{94.69,138.42}, {65.60,105.82},{35.88,72.51},{28.92,57.73},{70.72,108.05}, {30.94,70.40},{47.26,92.07},{14.10,54.78},{10.36,32.79}, {21.10,68.42},{11.04,54.35},{11.07,41.89},{ 4.72,46.93}, {32.16,62.12},{30.07,76.30},{48.67,90.99},{50.39,99.08}, {35.49,82.03},{20.02,63.53},{90.52,116.31},{29.19,73.06}, {84.53,135.72},{37.41,79.17},{21.41,56.80},{64.69,112.06}, {76.71,128.98},{86.87,146.26},{49.96,102.34},{21.07,68.04}, {43.80,88.33},{41.67,105.01},{13.26,59.46},{11.81,44.33}, {11.97,35.79},{76.44,133.07},{ 1.61,35.24},{86.13,139.48}, {71.39,111.60},{44.21,94.14},{88.87,128.78},{88.94,141.24}, {31.24,77.00},{33.48,84.28},{ 8.99,32.36},{71.13,102.96}, {19.98,74.32},{71.94,115.85},{ 3.41,44.00},{62.71,115.40}, {74.34,147.98},{56.13,75.43},{61.54,100.14},{96.71,142.71}, {62.43,96.77},{13.90,58.87},{48.02,111.64},{15.56,57.57}, {83.67,132.48},{65.43,107.98},{69.73,116.42},{16.32,55.09}, {64.96,98.72},{79.19,141.49},{59.67,100.44},{48.29,86.60}, {88.36,137.68},{69.61,116.08},{65.78,108.27},{ 9.61,30.78}, {13.31,43.45},{43.80,76.51},{33.77,80.70},{37.48,83.28}, {38.22,95.36},{44.98,93.69},{22.13,78.24},{12.37,64.49}, {48.88,89.01},{ 8.35,50.01},{27.66,71.62},{89.63,133.03}, {45.69,92.76},{89.87,127.83},{19.30,77.88},{51.58,91.46}, {36.58,91.83},{72.22,107.09},{81.12,121.04},{63.85,124.99}, {51.44,107.96},{47.83,84.23},{12.07,75.14},{70.17,113.80}, {63.83,126.01},{24.40,74.79},{ 5.03,32.89},{66.80,125.93}, {58.30,89.90},{85.81,115.71},{80.71,106.93},{59.43,90.49}, {44.93,68.12},{77.26,115.86},{32.85,76.57},{93.93,139.20}, {85.24,143.19},{78.41,136.67},{66.68,91.90},{34.44,77.03}, {15.28,73.96},{25.03,75.66},{10.03,54.65},{21.89,67.50}, {72.60,113.03},{31.53,73.26},{61.87,127.33},{54.02,105.24}, {21.17,77.23},{22.58,64.11},{99.52,148.64},{16.56,59.58}, {35.17,80.58},{82.04,123.19},{65.11,123.04},{67.12,143.80}, {67.79,125.46},{89.70,130.04},{49.63,95.16},{90.07,128.41}, {23.39,75.47},{54.01,93.40},{22.09,66.61},{73.61,112.71}, {14.51,50.66},{81.38,126.96},{63.32,123.85},{42.96,83.11}, {78.78,136.60},{14.76,53.81},{38.07,77.99},{ 0.35,48.65}, {78.95,123.11},{54.47,91.72},{ 6.41,34.05},{41.19,91.38}, {43.91,82.70},{ 5.01,47.48},{45.71,79.12},{99.00,155.94}, {30.23,66.36},{48.09,104.00},{13.00,58.20},{59.17,109.36}, {26.65,62.39},{95.91,163.69},{69.71,117.93},{46.87,79.54}, {68.99,114.16},{94.62,128.63},{19.52,80.58},{88.76,148.49}, {78.71,135.81},{49.74,99.20},{56.71,88.63},{96.17,154.58}, {45.24,87.04},{64.77,110.00},{90.52,126.41},{51.97,95.75}, {59.09,108.94},{98.18,149.19},{11.41,40.49},{82.36,140.69}, {19.16,80.67},{60.83,86.45},{69.99,117.82},{ 7.55,38.21}, {16.18,68.26},{52.90,110.46},{51.02,104.93},{45.96,75.07}, {75.90,135.39},{ 2.29,52.60},{80.96,111.96},{87.92,154.07}, { 8.73,46.30},{20.69,63.68},{91.30,131.74},{13.14,42.61}, {36.21,90.54},{66.03,97.69},{85.37,133.40},{89.17,155.78}, {93.16,133.01},{77.71,142.58},{ 7.80,46.80},{ 7.49,45.37}, {27.53,79.14},{23.52,66.28},{56.28,97.48},{78.78,138.10}, {41.97,94.35},{25.25,68.88},{38.66,72.78},{47.27,78.95}, {68.91,134.64},{86.33,129.52},{66.62,125.59},{ 1.43,54.24}, { 5.23,37.47},{58.80,103.12},{49.06,94.53},{92.48,151.35}, {29.30,64.42},{31.85,62.74},{90.41,142.46},{ 9.06,49.06}, {34.14,75.78},{75.47,129.08},{58.61,94.44},{40.51,74.73}, {94.80,143.67},{ 1.69,28.20},{38.52,63.51},{18.99,58.14}, {77.35,137.30},{11.34,67.05},{96.85,158.46},{83.46,127.73}, {89.57,143.21},{12.94,62.23},{92.04,145.90},{67.55,128.07}, {33.08,60.99},{81.44,143.78},{ 3.36,44.05},{26.67,80.37}, {98.74,150.46},{ 7.89,32.54},{81.40,125.83},{14.28,57.90}, {74.38,123.31},{ 7.93,62.13},{95.28,155.76},{45.18,95.69}, {68.60,102.36},{ 3.98,40.18},{85.75,129.91},{92.81,138.62}, { 5.61,56.84},{93.19,144.00},{ 6.11,33.39},{85.45,127.36}, {10.26,60.68},{64.86,116.08},{69.56,137.63},{97.12,142.39}, {77.95,122.81},{89.20,127.14},{18.84,55.05},{71.59,129.04}, {47.83,102.93},{86.52,132.59},{19.27,68.71},{92.29,134.47}, {53.51,102.91},{42.69,90.49},{33.46,75.55},{11.34,60.69}, {30.39,70.37},{50.17,98.70},{20.30,46.62},{44.71,90.81}, {28.04,78.74},{78.52,126.79},{84.79,141.38},{65.73,104.54}, { 2.52,38.61},{76.34,132.67},{48.14,92.93},{87.58,133.74}, {13.32,39.77},{60.80,115.02},{20.47,65.98},{59.77,105.75}, {29.29,70.88},{49.53,101.54},{57.93,105.43},{54.14,95.35}, {62.92,105.89},{17.85,73.00},{91.02,153.82},{47.00,101.60}, { 6.94,52.05},{55.70,109.76},{78.87,133.67},{ 2.29,37.00}, {85.12,143.69},{52.76,106.35},{48.29,88.70},{67.86,118.22}, {69.26,102.10},{53.00,101.09},{64.66,115.57},{18.07,59.16}, {51.10,79.75},{80.83,141.22},{69.71,114.52},{13.89,50.11}, {74.18,108.33},{45.74,78.80},{95.83,156.20},{47.30,85.14}, {58.86,111.71},{99.87,149.16},{27.94,68.22},{16.68,44.60}, {81.38,113.52},{49.41,101.78},{43.67,90.88},{62.22,108.27}, {10.12,46.40},{11.00,37.94},{23.73,59.91},{29.65,68.83}, {93.44,144.73},{84.89,133.74},{89.90,147.59},{57.59,107.32}, {53.75,94.19},{62.60,114.89},{20.06,63.09},{47.14,102.14}, {69.41,124.25},{28.69,52.98},{31.51,55.70},{97.51,141.15}, {39.59,83.49},{36.81,87.58},{60.03,96.34},{ 3.19,42.44}, {39.38,95.74},{65.35,100.39},{31.33,76.93},{59.03,114.95}, {62.38,121.06},{87.39,150.06},{ 9.81,34.49},{47.72,101.13}, {17.33,46.56},{29.06,77.76},{75.42,132.63},{89.20,116.35}, {45.35,89.73},{69.83,119.86},{60.73,101.05},{75.63,117.53}, {85.29,132.29},{32.82,91.72},{26.68,67.04},{47.57,75.76}, {56.49,101.25},{14.13,46.77},{81.35,137.94},{99.51,136.88}, {21.16,72.72},{30.90,65.39},{19.36,64.41},{26.09,75.63}, {63.02,118.68},{79.49,121.87},{75.38,115.23},{81.98,135.21}, {65.83,116.57},{ 1.35,53.04},{55.18,101.14},{ 0.57,38.69}, {14.22,48.43},{ 3.91,55.66},{35.00,73.69},{ 0.16,35.31}, {78.54,121.78},{20.92,60.16},{61.53,123.10},{ 7.42,54.83}, {21.27,50.40},{46.00,90.76},{21.02,55.44},{33.38,60.08}, {24.33,58.11},{98.65,130.06},{49.02,96.94},{16.16,65.17}, { 1.13,52.33},{16.04,56.95},{14.60,43.65},{66.78,112.94}, {78.10,116.70},{ 6.11,50.33},{68.02,119.42},{30.73,74.55}, {75.95,118.59},{89.30,137.83},{38.43,83.21},{81.23,135.79}, {28.54,59.12},{89.60,133.05},{55.49,124.61},{ 4.51,27.97}, {66.71,107.14},{66.14,120.83},{83.75,137.35},{53.88,92.82}, {59.80,110.74},{27.75,69.51},{47.43,80.69},{59.15,109.89}, {40.74,66.77},{78.83,123.40},{33.60,84.08},{93.37,136.12}, { 2.61,43.21},{52.30,90.86},{97.78,146.58},{24.87,79.35}, {18.71,71.09},{19.36,47.44},{55.49,97.45},{66.25,113.72}, {77.84,126.83},{99.98,162.75},{75.21,130.64},{86.26,142.43}, {66.30,131.35},{99.82,145.92},{48.05,111.02},{42.30,76.06}, {95.10,130.55},{43.37,72.78},{75.55,124.78},{16.94,46.10}, { 2.76,32.71},{ 6.02,41.23},{95.45,155.45},{46.97,87.53}, { 6.65,45.15},{91.43,147.03},{30.36,68.10},{45.31,96.44}, { 2.69,26.86},{55.48,85.92},{28.15,62.28},{68.04,132.62}, {14.16,44.00},{11.23,42.42},{25.45,64.48},{65.28,120.77}, {21.23,57.77},{20.66,60.89},{91.78,137.94},{84.74,117.74}, {23.57,71.11},{79.22,130.05},{72.47,112.68},{27.36,64.60}, {83.58,128.91},{92.21,134.19},{ 5.90,32.59},{62.45,116.57}, { 7.83,47.99},{72.41,108.14},{54.55,90.94},{26.31,54.87}, {33.21,103.91},{58.01,103.46},{58.75,98.45},{ 2.11,57.30}, {89.84,131.28},{96.35,144.30},{93.25,147.80},{93.77,144.03}, {55.32,97.01},{37.56,78.03},{73.66,127.43},{34.64,64.68}, {67.65,103.48},{28.91,79.13},{85.08,134.14},{40.87,87.83}, {45.56,89.38},{38.53,86.88},{95.43,143.40},{54.46,102.71}, {90.67,153.34},{ 0.34,45.27},{42.99,75.01},{53.75,97.11}, {52.17,100.02},{93.81,158.25},{80.64,128.22},{80.02,136.16}, { 5.11,52.21},{60.56,105.34},{19.91,52.11},{22.36,66.80}, {69.14,121.15},{68.21,118.35},{ 3.07,48.20},{69.90,101.48}, {28.79,68.54},{67.25,112.99},{79.70,113.71},{38.93,90.69}, {17.74,56.54},{87.87,146.73},{68.99,121.93},{54.63,111.71}, { 6.49,45.18},{47.28,100.72},{10.24,36.66},{16.36,70.59}, {11.43,58.63},{26.20,75.52},{81.27,135.37},{46.90,102.25}, {67.90,122.16},{16.23,50.34},{37.76,80.35},{17.42,74.09}, {45.76,93.58},{46.52,98.83},{38.49,94.41},{32.14,77.18}, {45.02,90.28},{ 6.22,32.26},{15.86,57.15},{97.69,152.03}, {30.84,67.45},{33.75,58.95},{28.13,61.22},{57.01,96.80}, {58.58,98.32},{94.84,127.23},{39.13,87.99},{88.72,139.94}, {82.91,130.65},{60.83,86.41},{27.91,73.72},{90.80,136.79}, {36.51,71.58},{52.68,104.45},{95.80,159.57},{28.06,78.81}, {70.64,115.12},{40.52,109.74},{84.64,118.02},{28.11,75.05}, {69.64,109.17},{83.51,121.44},{ 8.39,46.39},{93.52,133.69}, {65.52,111.82},{56.61,112.09},{99.62,152.99},{95.63,157.60}, {67.42,113.53},{43.04,72.33},{52.82,107.30},{12.23,62.64}, {69.70,119.27},{32.43,74.01},{ 1.79,37.25},{ 0.48,37.71}, {73.42,114.44},{45.16,91.85},{21.42,56.59},{28.12,61.79}, {25.83,53.87},{50.91,94.39},{ 5.91,39.67},{25.76,59.65}, {84.43,131.11},{51.93,95.08},{43.98,113.97},{11.15,59.31}, {71.09,90.60},{13.56,46.71},{12.77,53.93},{ 9.30,54.95}, {10.37,50.24},{46.91,95.76},{ 7.27,47.49},{53.38,81.00}, {87.64,135.11},{43.85,80.68},{66.52,106.86},{95.29,140.84}, { 3.29,45.33},{23.98,62.54},{97.17,136.83},{18.20,51.98}, {16.67,54.50},{11.59,42.06},{ 8.19,75.81},{46.82,94.90}, { 3.49,56.08},{34.57,87.10},{77.07,127.82},{20.87,56.09}, {65.56,102.30},{93.19,158.68},{91.93,144.65},{72.24,116.64}, {47.46,90.03},{47.85,71.56},{84.44,133.34},{88.94,120.58}, {75.79,129.83},{10.85,56.31},{60.67,113.08},{86.84,128.87}, {27.47,59.92},{88.30,137.85},{24.03,76.76},{52.94,90.34}, {99.16,143.50},{ 0.79,24.92},{63.71,105.16},{18.87,57.57}, {69.75,110.65},{49.34,94.79},{ 2.45,35.03},{58.69,108.02}, {85.95,132.49},{79.38,125.82},{10.43,39.76},{88.62,135.64}, {98.76,142.18},{51.40,100.96},{63.21,112.25},{96.89,147.91}, { 0.18,46.29},{50.16,98.61},{82.28,122.47},{89.05,139.97}, {41.15,83.53},{98.45,150.52},{51.61,94.90},{61.17,108.49}, {34.98,81.49},{69.67,109.90},{91.48,148.28},{97.95,153.98}, {37.49,72.85},{74.57,107.72},{59.58,109.74},{47.37,101.65}, { 1.76,36.33},{97.71,134.88},{77.50,117.88},{58.94,125.63}, {75.75,97.11},{54.85,95.51},{91.04,150.71},{56.19,102.82}, {51.68,103.60},{60.94,105.19},{88.31,137.77},{ 3.49,51.93}, {49.68,94.84},{21.85,63.30},{53.22,114.98},{11.49,53.79}, {46.23,90.95},{78.98,125.52},{78.63,100.14},{42.66,72.77}, {66.18,111.95},{19.50,69.61},{71.98,106.58},{44.85,96.46}, {38.91,87.41},{86.37,149.46},{24.92,65.95},{54.56,92.79}, {10.46,50.26},{49.47,85.40},{58.22,110.69},{36.83,83.91}, {24.79,57.36},{43.42,83.94},{63.63,125.05},{38.61,76.00}, {61.23,108.85},{55.24,84.36},{12.70,41.41},{74.63,124.95}, {74.39,109.75},{94.03,143.37},{48.58,93.00},{29.78,91.53}, { 8.39,60.41},{38.37,65.44},{85.58,122.23},{57.08,97.19}, {56.02,97.74},{33.39,64.72},{42.37,77.85},{66.76,121.17}, {45.97,81.97},{39.51,81.26},{ 0.69,35.02},{69.04,122.45}, {62.12,103.86},{88.27,125.26},{67.10,118.27},{34.84,75.78}, {65.14,111.23},{29.23,64.07},{34.22,95.25},{44.02,85.52}, {17.20,42.63},{34.13,72.30},{79.89,133.42},{21.05,69.11}, {63.40,108.07},{57.25,108.12},{56.78,101.45},{30.12,77.57}, {93.57,142.88},{58.46,101.82},{18.56,66.09},{33.98,85.65}, {12.41,48.78},{19.10,54.24},{15.47,44.38},{ 7.50,38.86}, {97.61,148.37},{ 2.35,45.54},{72.80,116.98},{67.98,125.66}, {14.65,60.57},{11.86,47.63},{38.00,86.55},{57.68,102.58}, {10.13,64.49},{11.58,48.54},{24.34,59.22},{62.82,113.29}, {22.28,76.32},{61.05,88.77},{23.70,69.67},{10.83,50.61}, {53.41,93.86},{24.18,55.97},{97.91,148.96},{60.68,97.30}, {38.44,79.37},{89.80,130.90},{85.06,146.25},{22.69,51.10}, {41.92,68.97},{57.75,102.72},{60.78,115.26},{39.98,90.28}, {51.79,80.62},{67.04,111.50},{48.77,81.14},{ 9.61,18.93}, {39.88,72.44},{30.76,65.75},{20.07,50.51},{94.25,146.82}, {23.42,63.56},{24.05,69.04},{69.40,113.47},{66.48,119.06}, { 8.69,51.61},{37.28,84.30},{26.40,85.93},{71.78,122.15}, {70.10,134.81},{73.91,113.96},{ 4.58,41.48},{24.01,71.43}, {13.08,53.65},{16.79,50.82},{43.32,75.33},{38.78,71.45}, {30.76,73.43},{83.37,118.57},{98.58,139.48},{81.93,131.44}, {86.28,131.69},{77.35,123.23},{41.85,103.46},{ 0.72,39.25}, {53.93,94.76},{70.40,97.41},{78.94,115.99},{22.38,62.97}, { 8.71,48.89},{33.18,81.64},{87.64,129.76},{53.78,117.55}, {84.33,132.09},{ 3.95,29.91},{ 1.22,37.69},{36.61,100.05}, {77.85,116.79},{55.45,101.06},{55.31,96.07},{30.68,76.43}, {41.32,105.51},{26.96,81.23},{21.63,73.14},{39.38,92.62}, {67.83,100.92},{55.78,117.17},{93.54,152.61},{72.07,138.21}, {84.05,131.95},{74.99,140.66},{64.85,115.75},{51.13,97.80}, {14.15,49.21},{19.40,56.79},{74.85,125.41},{32.54,87.62}, {42.23,76.72},{21.78,70.99},{36.67,69.35},{24.27,60.27}, {16.41,46.57},{47.29,100.17},{62.81,118.18},{58.89,118.26}, {56.36,101.22},{65.10,117.82},{29.18,61.37},{73.80,115.91}, {39.50,77.89},{64.09,103.32},{30.68,75.61},{16.71,69.93}, {32.54,79.47},{38.85,85.67},{48.92,100.61},{68.24,114.00}, {97.36,146.82},{65.35,95.83},{75.41,107.22},{ 3.45,43.37}, {60.03,79.70},{25.45,63.03},{45.01,71.13},{66.02,128.07}, {72.69,114.48},{53.23,101.92},{24.29,66.87},{53.42,102.96}, {54.94,111.06},{84.20,136.03},{75.05,121.43},{87.89,128.32}, {43.02,83.02},{25.31,72.16},{33.15,74.86},{62.84,109.94}, {43.90,92.88},{59.09,104.08},{62.02,111.39},{27.93,65.97}, {29.07,66.64},{89.97,130.77},{80.64,142.60},{95.07,138.94}, {18.75,53.18},{72.88,120.64},{28.28,85.71},{74.73,113.08}, {92.08,148.24},{82.89,116.08},{48.75,102.19},{30.54,69.54}, {56.89,117.70},{98.56,147.42},{47.81,78.81},{94.52,133.69}, {11.28,51.99},{46.18,83.99},{70.40,112.30},{20.01,76.37}, { 0.30,29.18},{15.12,85.43},{74.55,114.22},{89.30,140.01}, {76.85,112.56},{95.91,158.18},{26.42,59.50},{ 6.42,56.71}, {94.51,142.09},{44.92,70.88},{27.07,75.37},{32.25,71.62}, { 5.02,42.38},{33.02,77.06},{35.19,76.70},{48.89,102.74}, {70.44,118.84},{85.81,149.10},{ 3.83,53.96},{33.45,80.83}, {80.49,138.42},{43.62,109.14},{89.03,138.36},{13.47,67.59} }; double residual_error(double r, double a, double m, double c) { double e = (m * r) + c - a; return e * e; } __device__ double d_residual_error(double r, double a, double m, double c) { double e = (m * r) + c - a; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) { int i = threadIdx.x + blockIdx.x *blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0){ ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(){ int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be= rms_error(bm,bc); error=hipMalloc(&d_dm,(sizeof(double) * 8)); if(error){ fprintf(stderr,"hipMalloc on d_dm returned %d %s\n",error, hipGetErrorString(error)); exit(1); } error=hipMalloc(&d_dc,(sizeof(double) * 8)); if(error){ fprintf(stderr,"hipMalloc on d_dc returned %d %s\n",error, hipGetErrorString(error)); exit(1); } error=hipMalloc(&d_error_sum_arr,(sizeof(double) * 1000)); if(error){ fprintf(stderr,"hipMalloc on d_error_sum_arr returned %d %s\n",error, hipGetErrorString(error)); exit(1); } error=hipMalloc(&d_data,sizeof(data)); if(error){ fprintf(stderr,"hipMalloc on d_data returned %d %s\n",error, hipGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i]= bc + (oc[i] * step); } error = hipMemcpy(d_dm,dm,(sizeof(double)*8), hipMemcpyHostToDevice); if(error){ fprintf(stderr,"hipMemcpy to d_dm returned %d %s\n",error, hipGetErrorString(error)); } error = hipMemcpy(d_dc,dc,(sizeof(double)*8), hipMemcpyHostToDevice); if(error){ fprintf(stderr,"hipMemcpy to d_dc returned %d %s\n",error, hipGetErrorString(error)); } error = hipMemcpy(d_data, data,sizeof(data), hipMemcpyHostToDevice); if(error){ fprintf(stderr,"hipMemcpy to d_data returned %d %s\n",error, hipGetErrorString(error)); } for(i=0;i<8;i++){ double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i],&d_dc[i],d_error_sum_arr,d_data); hipDeviceSynchronize(); error =hipMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000), hipMemcpyDeviceToHost); if(error){ fprintf(stderr,"hipMemcpy to error_sum returned %d %s\n",error, hipGetErrorString(error)); } for(int j=0;j<n_data;j++){ error_sum_total+= h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] =sqrt(error_sum_mean); if(e[i] < best_error){ best_error = e[i]; error_sum_total +=h_error_sum_arr[i]; } error_sum_mean = error_sum_total /n_data; e[i] = sqrt(error_sum_mean); if(e[i]<best_error){ best_error = e[i]; best_error_i = i; } error_sum_total = 0; } if(best_error <be){ be=best_error; bm =dm[best_error_i]; bc= dc[best_error_i]; }else { minimum_found = 1; } } error = hipFree(d_dm); if(error){ fprintf(stderr,"hipFree on d_dm returned %d %s\n",error, hipGetErrorString(error)); exit(1); } error = hipFree(d_dc); if(error){ fprintf(stderr,"hipFree on d_dc returned %d %s\n",error, hipGetErrorString(error)); exit(1); } error = hipFree(d_data); if(error){ fprintf(stderr,"hipFree on d_data returned %d %s\n",error, hipGetErrorString(error)); exit(1); } error = hipFree(d_error_sum_arr); if(error){ fprintf(stderr,"hipFree on d_error_sum_arr returned %d %s\n",error, hipGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; } ;
c248aa17335d03e525c8bba991288b66317b2072.cu
#include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <errno.h> #include <unistd.h> typedef struct point_t{ double x; double y; }point_t; int n_data = 1000; __device__ int d_n_data =1000; point_t data[] = { {67.05,130.55},{72.71,110.93},{73.87,116.95},{82.83,121.67}, {65.48,107.40},{71.05,153.65},{71.18,129.72},{65.97,111.80}, {78.00,128.82},{69.58,122.06},{83.71,132.56},{72.93,124.42}, {32.30,82.52},{98.06,159.12},{36.00,96.63},{57.30,97.95}, {31.09,81.46},{52.39,101.61},{ 2.44,49.27},{51.19,98.18}, {24.56,64.33},{93.68,128.37},{24.69,55.03},{75.87,117.14}, { 7.81,60.05},{58.21,103.17},{68.62,119.70},{66.21,122.28}, {54.02,106.00},{21.10,60.60},{21.29,77.59},{74.63,104.05}, {61.34,107.26},{91.52,135.16},{83.53,132.52},{14.41,50.37}, {16.45,43.68},{63.25,92.18},{98.79,157.08},{60.62,100.78}, {50.34,88.46},{ 9.42,56.21},{90.24,140.16},{ 7.81,56.32}, { 0.98,60.53},{26.20,67.08},{37.89,82.70},{15.13,48.75}, {99.28,147.44},{83.72,147.08},{54.34,101.58},{99.46,142.41}, {54.05,104.09},{95.03,129.33},{86.14,137.63},{95.72,166.69}, {47.38,104.52},{89.50,125.54},{78.82,124.78},{ 9.78,52.28}, {24.43,67.41},{95.62,155.09},{94.65,164.02},{ 5.08,43.64}, {28.19,66.34},{64.44,112.47},{97.73,128.82},{10.83,32.85}, {12.09,53.74},{48.30,103.34},{93.51,141.33},{99.95,139.83}, {11.08,40.78},{46.90,107.80},{16.35,49.45},{33.31,45.91}, {14.21,49.22},{16.01,70.02},{49.39,87.52},{82.92,124.35}, {46.91,93.72},{24.80,55.76},{26.93,64.60},{94.69,138.42}, {65.60,105.82},{35.88,72.51},{28.92,57.73},{70.72,108.05}, {30.94,70.40},{47.26,92.07},{14.10,54.78},{10.36,32.79}, {21.10,68.42},{11.04,54.35},{11.07,41.89},{ 4.72,46.93}, {32.16,62.12},{30.07,76.30},{48.67,90.99},{50.39,99.08}, {35.49,82.03},{20.02,63.53},{90.52,116.31},{29.19,73.06}, {84.53,135.72},{37.41,79.17},{21.41,56.80},{64.69,112.06}, {76.71,128.98},{86.87,146.26},{49.96,102.34},{21.07,68.04}, {43.80,88.33},{41.67,105.01},{13.26,59.46},{11.81,44.33}, {11.97,35.79},{76.44,133.07},{ 1.61,35.24},{86.13,139.48}, {71.39,111.60},{44.21,94.14},{88.87,128.78},{88.94,141.24}, {31.24,77.00},{33.48,84.28},{ 8.99,32.36},{71.13,102.96}, {19.98,74.32},{71.94,115.85},{ 3.41,44.00},{62.71,115.40}, {74.34,147.98},{56.13,75.43},{61.54,100.14},{96.71,142.71}, {62.43,96.77},{13.90,58.87},{48.02,111.64},{15.56,57.57}, {83.67,132.48},{65.43,107.98},{69.73,116.42},{16.32,55.09}, {64.96,98.72},{79.19,141.49},{59.67,100.44},{48.29,86.60}, {88.36,137.68},{69.61,116.08},{65.78,108.27},{ 9.61,30.78}, {13.31,43.45},{43.80,76.51},{33.77,80.70},{37.48,83.28}, {38.22,95.36},{44.98,93.69},{22.13,78.24},{12.37,64.49}, {48.88,89.01},{ 8.35,50.01},{27.66,71.62},{89.63,133.03}, {45.69,92.76},{89.87,127.83},{19.30,77.88},{51.58,91.46}, {36.58,91.83},{72.22,107.09},{81.12,121.04},{63.85,124.99}, {51.44,107.96},{47.83,84.23},{12.07,75.14},{70.17,113.80}, {63.83,126.01},{24.40,74.79},{ 5.03,32.89},{66.80,125.93}, {58.30,89.90},{85.81,115.71},{80.71,106.93},{59.43,90.49}, {44.93,68.12},{77.26,115.86},{32.85,76.57},{93.93,139.20}, {85.24,143.19},{78.41,136.67},{66.68,91.90},{34.44,77.03}, {15.28,73.96},{25.03,75.66},{10.03,54.65},{21.89,67.50}, {72.60,113.03},{31.53,73.26},{61.87,127.33},{54.02,105.24}, {21.17,77.23},{22.58,64.11},{99.52,148.64},{16.56,59.58}, {35.17,80.58},{82.04,123.19},{65.11,123.04},{67.12,143.80}, {67.79,125.46},{89.70,130.04},{49.63,95.16},{90.07,128.41}, {23.39,75.47},{54.01,93.40},{22.09,66.61},{73.61,112.71}, {14.51,50.66},{81.38,126.96},{63.32,123.85},{42.96,83.11}, {78.78,136.60},{14.76,53.81},{38.07,77.99},{ 0.35,48.65}, {78.95,123.11},{54.47,91.72},{ 6.41,34.05},{41.19,91.38}, {43.91,82.70},{ 5.01,47.48},{45.71,79.12},{99.00,155.94}, {30.23,66.36},{48.09,104.00},{13.00,58.20},{59.17,109.36}, {26.65,62.39},{95.91,163.69},{69.71,117.93},{46.87,79.54}, {68.99,114.16},{94.62,128.63},{19.52,80.58},{88.76,148.49}, {78.71,135.81},{49.74,99.20},{56.71,88.63},{96.17,154.58}, {45.24,87.04},{64.77,110.00},{90.52,126.41},{51.97,95.75}, {59.09,108.94},{98.18,149.19},{11.41,40.49},{82.36,140.69}, {19.16,80.67},{60.83,86.45},{69.99,117.82},{ 7.55,38.21}, {16.18,68.26},{52.90,110.46},{51.02,104.93},{45.96,75.07}, {75.90,135.39},{ 2.29,52.60},{80.96,111.96},{87.92,154.07}, { 8.73,46.30},{20.69,63.68},{91.30,131.74},{13.14,42.61}, {36.21,90.54},{66.03,97.69},{85.37,133.40},{89.17,155.78}, {93.16,133.01},{77.71,142.58},{ 7.80,46.80},{ 7.49,45.37}, {27.53,79.14},{23.52,66.28},{56.28,97.48},{78.78,138.10}, {41.97,94.35},{25.25,68.88},{38.66,72.78},{47.27,78.95}, {68.91,134.64},{86.33,129.52},{66.62,125.59},{ 1.43,54.24}, { 5.23,37.47},{58.80,103.12},{49.06,94.53},{92.48,151.35}, {29.30,64.42},{31.85,62.74},{90.41,142.46},{ 9.06,49.06}, {34.14,75.78},{75.47,129.08},{58.61,94.44},{40.51,74.73}, {94.80,143.67},{ 1.69,28.20},{38.52,63.51},{18.99,58.14}, {77.35,137.30},{11.34,67.05},{96.85,158.46},{83.46,127.73}, {89.57,143.21},{12.94,62.23},{92.04,145.90},{67.55,128.07}, {33.08,60.99},{81.44,143.78},{ 3.36,44.05},{26.67,80.37}, {98.74,150.46},{ 7.89,32.54},{81.40,125.83},{14.28,57.90}, {74.38,123.31},{ 7.93,62.13},{95.28,155.76},{45.18,95.69}, {68.60,102.36},{ 3.98,40.18},{85.75,129.91},{92.81,138.62}, { 5.61,56.84},{93.19,144.00},{ 6.11,33.39},{85.45,127.36}, {10.26,60.68},{64.86,116.08},{69.56,137.63},{97.12,142.39}, {77.95,122.81},{89.20,127.14},{18.84,55.05},{71.59,129.04}, {47.83,102.93},{86.52,132.59},{19.27,68.71},{92.29,134.47}, {53.51,102.91},{42.69,90.49},{33.46,75.55},{11.34,60.69}, {30.39,70.37},{50.17,98.70},{20.30,46.62},{44.71,90.81}, {28.04,78.74},{78.52,126.79},{84.79,141.38},{65.73,104.54}, { 2.52,38.61},{76.34,132.67},{48.14,92.93},{87.58,133.74}, {13.32,39.77},{60.80,115.02},{20.47,65.98},{59.77,105.75}, {29.29,70.88},{49.53,101.54},{57.93,105.43},{54.14,95.35}, {62.92,105.89},{17.85,73.00},{91.02,153.82},{47.00,101.60}, { 6.94,52.05},{55.70,109.76},{78.87,133.67},{ 2.29,37.00}, {85.12,143.69},{52.76,106.35},{48.29,88.70},{67.86,118.22}, {69.26,102.10},{53.00,101.09},{64.66,115.57},{18.07,59.16}, {51.10,79.75},{80.83,141.22},{69.71,114.52},{13.89,50.11}, {74.18,108.33},{45.74,78.80},{95.83,156.20},{47.30,85.14}, {58.86,111.71},{99.87,149.16},{27.94,68.22},{16.68,44.60}, {81.38,113.52},{49.41,101.78},{43.67,90.88},{62.22,108.27}, {10.12,46.40},{11.00,37.94},{23.73,59.91},{29.65,68.83}, {93.44,144.73},{84.89,133.74},{89.90,147.59},{57.59,107.32}, {53.75,94.19},{62.60,114.89},{20.06,63.09},{47.14,102.14}, {69.41,124.25},{28.69,52.98},{31.51,55.70},{97.51,141.15}, {39.59,83.49},{36.81,87.58},{60.03,96.34},{ 3.19,42.44}, {39.38,95.74},{65.35,100.39},{31.33,76.93},{59.03,114.95}, {62.38,121.06},{87.39,150.06},{ 9.81,34.49},{47.72,101.13}, {17.33,46.56},{29.06,77.76},{75.42,132.63},{89.20,116.35}, {45.35,89.73},{69.83,119.86},{60.73,101.05},{75.63,117.53}, {85.29,132.29},{32.82,91.72},{26.68,67.04},{47.57,75.76}, {56.49,101.25},{14.13,46.77},{81.35,137.94},{99.51,136.88}, {21.16,72.72},{30.90,65.39},{19.36,64.41},{26.09,75.63}, {63.02,118.68},{79.49,121.87},{75.38,115.23},{81.98,135.21}, {65.83,116.57},{ 1.35,53.04},{55.18,101.14},{ 0.57,38.69}, {14.22,48.43},{ 3.91,55.66},{35.00,73.69},{ 0.16,35.31}, {78.54,121.78},{20.92,60.16},{61.53,123.10},{ 7.42,54.83}, {21.27,50.40},{46.00,90.76},{21.02,55.44},{33.38,60.08}, {24.33,58.11},{98.65,130.06},{49.02,96.94},{16.16,65.17}, { 1.13,52.33},{16.04,56.95},{14.60,43.65},{66.78,112.94}, {78.10,116.70},{ 6.11,50.33},{68.02,119.42},{30.73,74.55}, {75.95,118.59},{89.30,137.83},{38.43,83.21},{81.23,135.79}, {28.54,59.12},{89.60,133.05},{55.49,124.61},{ 4.51,27.97}, {66.71,107.14},{66.14,120.83},{83.75,137.35},{53.88,92.82}, {59.80,110.74},{27.75,69.51},{47.43,80.69},{59.15,109.89}, {40.74,66.77},{78.83,123.40},{33.60,84.08},{93.37,136.12}, { 2.61,43.21},{52.30,90.86},{97.78,146.58},{24.87,79.35}, {18.71,71.09},{19.36,47.44},{55.49,97.45},{66.25,113.72}, {77.84,126.83},{99.98,162.75},{75.21,130.64},{86.26,142.43}, {66.30,131.35},{99.82,145.92},{48.05,111.02},{42.30,76.06}, {95.10,130.55},{43.37,72.78},{75.55,124.78},{16.94,46.10}, { 2.76,32.71},{ 6.02,41.23},{95.45,155.45},{46.97,87.53}, { 6.65,45.15},{91.43,147.03},{30.36,68.10},{45.31,96.44}, { 2.69,26.86},{55.48,85.92},{28.15,62.28},{68.04,132.62}, {14.16,44.00},{11.23,42.42},{25.45,64.48},{65.28,120.77}, {21.23,57.77},{20.66,60.89},{91.78,137.94},{84.74,117.74}, {23.57,71.11},{79.22,130.05},{72.47,112.68},{27.36,64.60}, {83.58,128.91},{92.21,134.19},{ 5.90,32.59},{62.45,116.57}, { 7.83,47.99},{72.41,108.14},{54.55,90.94},{26.31,54.87}, {33.21,103.91},{58.01,103.46},{58.75,98.45},{ 2.11,57.30}, {89.84,131.28},{96.35,144.30},{93.25,147.80},{93.77,144.03}, {55.32,97.01},{37.56,78.03},{73.66,127.43},{34.64,64.68}, {67.65,103.48},{28.91,79.13},{85.08,134.14},{40.87,87.83}, {45.56,89.38},{38.53,86.88},{95.43,143.40},{54.46,102.71}, {90.67,153.34},{ 0.34,45.27},{42.99,75.01},{53.75,97.11}, {52.17,100.02},{93.81,158.25},{80.64,128.22},{80.02,136.16}, { 5.11,52.21},{60.56,105.34},{19.91,52.11},{22.36,66.80}, {69.14,121.15},{68.21,118.35},{ 3.07,48.20},{69.90,101.48}, {28.79,68.54},{67.25,112.99},{79.70,113.71},{38.93,90.69}, {17.74,56.54},{87.87,146.73},{68.99,121.93},{54.63,111.71}, { 6.49,45.18},{47.28,100.72},{10.24,36.66},{16.36,70.59}, {11.43,58.63},{26.20,75.52},{81.27,135.37},{46.90,102.25}, {67.90,122.16},{16.23,50.34},{37.76,80.35},{17.42,74.09}, {45.76,93.58},{46.52,98.83},{38.49,94.41},{32.14,77.18}, {45.02,90.28},{ 6.22,32.26},{15.86,57.15},{97.69,152.03}, {30.84,67.45},{33.75,58.95},{28.13,61.22},{57.01,96.80}, {58.58,98.32},{94.84,127.23},{39.13,87.99},{88.72,139.94}, {82.91,130.65},{60.83,86.41},{27.91,73.72},{90.80,136.79}, {36.51,71.58},{52.68,104.45},{95.80,159.57},{28.06,78.81}, {70.64,115.12},{40.52,109.74},{84.64,118.02},{28.11,75.05}, {69.64,109.17},{83.51,121.44},{ 8.39,46.39},{93.52,133.69}, {65.52,111.82},{56.61,112.09},{99.62,152.99},{95.63,157.60}, {67.42,113.53},{43.04,72.33},{52.82,107.30},{12.23,62.64}, {69.70,119.27},{32.43,74.01},{ 1.79,37.25},{ 0.48,37.71}, {73.42,114.44},{45.16,91.85},{21.42,56.59},{28.12,61.79}, {25.83,53.87},{50.91,94.39},{ 5.91,39.67},{25.76,59.65}, {84.43,131.11},{51.93,95.08},{43.98,113.97},{11.15,59.31}, {71.09,90.60},{13.56,46.71},{12.77,53.93},{ 9.30,54.95}, {10.37,50.24},{46.91,95.76},{ 7.27,47.49},{53.38,81.00}, {87.64,135.11},{43.85,80.68},{66.52,106.86},{95.29,140.84}, { 3.29,45.33},{23.98,62.54},{97.17,136.83},{18.20,51.98}, {16.67,54.50},{11.59,42.06},{ 8.19,75.81},{46.82,94.90}, { 3.49,56.08},{34.57,87.10},{77.07,127.82},{20.87,56.09}, {65.56,102.30},{93.19,158.68},{91.93,144.65},{72.24,116.64}, {47.46,90.03},{47.85,71.56},{84.44,133.34},{88.94,120.58}, {75.79,129.83},{10.85,56.31},{60.67,113.08},{86.84,128.87}, {27.47,59.92},{88.30,137.85},{24.03,76.76},{52.94,90.34}, {99.16,143.50},{ 0.79,24.92},{63.71,105.16},{18.87,57.57}, {69.75,110.65},{49.34,94.79},{ 2.45,35.03},{58.69,108.02}, {85.95,132.49},{79.38,125.82},{10.43,39.76},{88.62,135.64}, {98.76,142.18},{51.40,100.96},{63.21,112.25},{96.89,147.91}, { 0.18,46.29},{50.16,98.61},{82.28,122.47},{89.05,139.97}, {41.15,83.53},{98.45,150.52},{51.61,94.90},{61.17,108.49}, {34.98,81.49},{69.67,109.90},{91.48,148.28},{97.95,153.98}, {37.49,72.85},{74.57,107.72},{59.58,109.74},{47.37,101.65}, { 1.76,36.33},{97.71,134.88},{77.50,117.88},{58.94,125.63}, {75.75,97.11},{54.85,95.51},{91.04,150.71},{56.19,102.82}, {51.68,103.60},{60.94,105.19},{88.31,137.77},{ 3.49,51.93}, {49.68,94.84},{21.85,63.30},{53.22,114.98},{11.49,53.79}, {46.23,90.95},{78.98,125.52},{78.63,100.14},{42.66,72.77}, {66.18,111.95},{19.50,69.61},{71.98,106.58},{44.85,96.46}, {38.91,87.41},{86.37,149.46},{24.92,65.95},{54.56,92.79}, {10.46,50.26},{49.47,85.40},{58.22,110.69},{36.83,83.91}, {24.79,57.36},{43.42,83.94},{63.63,125.05},{38.61,76.00}, {61.23,108.85},{55.24,84.36},{12.70,41.41},{74.63,124.95}, {74.39,109.75},{94.03,143.37},{48.58,93.00},{29.78,91.53}, { 8.39,60.41},{38.37,65.44},{85.58,122.23},{57.08,97.19}, {56.02,97.74},{33.39,64.72},{42.37,77.85},{66.76,121.17}, {45.97,81.97},{39.51,81.26},{ 0.69,35.02},{69.04,122.45}, {62.12,103.86},{88.27,125.26},{67.10,118.27},{34.84,75.78}, {65.14,111.23},{29.23,64.07},{34.22,95.25},{44.02,85.52}, {17.20,42.63},{34.13,72.30},{79.89,133.42},{21.05,69.11}, {63.40,108.07},{57.25,108.12},{56.78,101.45},{30.12,77.57}, {93.57,142.88},{58.46,101.82},{18.56,66.09},{33.98,85.65}, {12.41,48.78},{19.10,54.24},{15.47,44.38},{ 7.50,38.86}, {97.61,148.37},{ 2.35,45.54},{72.80,116.98},{67.98,125.66}, {14.65,60.57},{11.86,47.63},{38.00,86.55},{57.68,102.58}, {10.13,64.49},{11.58,48.54},{24.34,59.22},{62.82,113.29}, {22.28,76.32},{61.05,88.77},{23.70,69.67},{10.83,50.61}, {53.41,93.86},{24.18,55.97},{97.91,148.96},{60.68,97.30}, {38.44,79.37},{89.80,130.90},{85.06,146.25},{22.69,51.10}, {41.92,68.97},{57.75,102.72},{60.78,115.26},{39.98,90.28}, {51.79,80.62},{67.04,111.50},{48.77,81.14},{ 9.61,18.93}, {39.88,72.44},{30.76,65.75},{20.07,50.51},{94.25,146.82}, {23.42,63.56},{24.05,69.04},{69.40,113.47},{66.48,119.06}, { 8.69,51.61},{37.28,84.30},{26.40,85.93},{71.78,122.15}, {70.10,134.81},{73.91,113.96},{ 4.58,41.48},{24.01,71.43}, {13.08,53.65},{16.79,50.82},{43.32,75.33},{38.78,71.45}, {30.76,73.43},{83.37,118.57},{98.58,139.48},{81.93,131.44}, {86.28,131.69},{77.35,123.23},{41.85,103.46},{ 0.72,39.25}, {53.93,94.76},{70.40,97.41},{78.94,115.99},{22.38,62.97}, { 8.71,48.89},{33.18,81.64},{87.64,129.76},{53.78,117.55}, {84.33,132.09},{ 3.95,29.91},{ 1.22,37.69},{36.61,100.05}, {77.85,116.79},{55.45,101.06},{55.31,96.07},{30.68,76.43}, {41.32,105.51},{26.96,81.23},{21.63,73.14},{39.38,92.62}, {67.83,100.92},{55.78,117.17},{93.54,152.61},{72.07,138.21}, {84.05,131.95},{74.99,140.66},{64.85,115.75},{51.13,97.80}, {14.15,49.21},{19.40,56.79},{74.85,125.41},{32.54,87.62}, {42.23,76.72},{21.78,70.99},{36.67,69.35},{24.27,60.27}, {16.41,46.57},{47.29,100.17},{62.81,118.18},{58.89,118.26}, {56.36,101.22},{65.10,117.82},{29.18,61.37},{73.80,115.91}, {39.50,77.89},{64.09,103.32},{30.68,75.61},{16.71,69.93}, {32.54,79.47},{38.85,85.67},{48.92,100.61},{68.24,114.00}, {97.36,146.82},{65.35,95.83},{75.41,107.22},{ 3.45,43.37}, {60.03,79.70},{25.45,63.03},{45.01,71.13},{66.02,128.07}, {72.69,114.48},{53.23,101.92},{24.29,66.87},{53.42,102.96}, {54.94,111.06},{84.20,136.03},{75.05,121.43},{87.89,128.32}, {43.02,83.02},{25.31,72.16},{33.15,74.86},{62.84,109.94}, {43.90,92.88},{59.09,104.08},{62.02,111.39},{27.93,65.97}, {29.07,66.64},{89.97,130.77},{80.64,142.60},{95.07,138.94}, {18.75,53.18},{72.88,120.64},{28.28,85.71},{74.73,113.08}, {92.08,148.24},{82.89,116.08},{48.75,102.19},{30.54,69.54}, {56.89,117.70},{98.56,147.42},{47.81,78.81},{94.52,133.69}, {11.28,51.99},{46.18,83.99},{70.40,112.30},{20.01,76.37}, { 0.30,29.18},{15.12,85.43},{74.55,114.22},{89.30,140.01}, {76.85,112.56},{95.91,158.18},{26.42,59.50},{ 6.42,56.71}, {94.51,142.09},{44.92,70.88},{27.07,75.37},{32.25,71.62}, { 5.02,42.38},{33.02,77.06},{35.19,76.70},{48.89,102.74}, {70.44,118.84},{85.81,149.10},{ 3.83,53.96},{33.45,80.83}, {80.49,138.42},{43.62,109.14},{89.03,138.36},{13.47,67.59} }; double residual_error(double r, double a, double m, double c) { double e = (m * r) + c - a; return e * e; } __device__ double d_residual_error(double r, double a, double m, double c) { double e = (m * r) + c - a; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) { int i = threadIdx.x + blockIdx.x *blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0){ ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(){ int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be= rms_error(bm,bc); error=cudaMalloc(&d_dm,(sizeof(double) * 8)); if(error){ fprintf(stderr,"cudaMalloc on d_dm returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } error=cudaMalloc(&d_dc,(sizeof(double) * 8)); if(error){ fprintf(stderr,"cudaMalloc on d_dc returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000)); if(error){ fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } error=cudaMalloc(&d_data,sizeof(data)); if(error){ fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i]= bc + (oc[i] * step); } error = cudaMemcpy(d_dm,dm,(sizeof(double)*8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr,"cudaMemcpy to d_dm returned %d %s\n",error, cudaGetErrorString(error)); } error = cudaMemcpy(d_dc,dc,(sizeof(double)*8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr,"cudaMemcpy to d_dc returned %d %s\n",error, cudaGetErrorString(error)); } error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error, cudaGetErrorString(error)); } for(i=0;i<8;i++){ double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data); cudaThreadSynchronize(); error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error, cudaGetErrorString(error)); } for(int j=0;j<n_data;j++){ error_sum_total+= h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] =sqrt(error_sum_mean); if(e[i] < best_error){ best_error = e[i]; error_sum_total +=h_error_sum_arr[i]; } error_sum_mean = error_sum_total /n_data; e[i] = sqrt(error_sum_mean); if(e[i]<best_error){ best_error = e[i]; best_error_i = i; } error_sum_total = 0; } if(best_error <be){ be=best_error; bm =dm[best_error_i]; bc= dc[best_error_i]; }else { minimum_found = 1; } } error = cudaFree(d_dm); if(error){ fprintf(stderr,"cudaFree on d_dm returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_dc); if(error){ fprintf(stderr,"cudaFree on d_dc returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_data); if(error){ fprintf(stderr,"cudaFree on d_data returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; } ;
eb538ff931498b637789ed60655574641440203a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "CudaErrorChecks.h" void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; }
eb538ff931498b637789ed60655574641440203a.cu
#include <stdio.h> #include "CudaErrorChecks.h" void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; }
2ef01038d2a53eda1aca7d5a245a90a3e5f174c6.hip
// !!! This is a file automatically generated by hipify!!! /** * \file dnn/src/cuda/remap/backward_mat.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #include <hip/hip_runtime.h> #include "src/common/rounding_converter.cuh" #include "src/cuda/cv/kernel_common.cuh" #include "src/cuda/remap/common.h" #include "src/cuda/utils.cuh" using namespace megdnn; using namespace cuda; using namespace remap; using namespace rounding; namespace { template <const uint32_t format> __device__ inline int get_offset(int height, int width, int channel, int h, int w, int c); template <> __device__ inline int get_offset<param_enumv::Remap::Format::NCHW>( int height, int width, int channel, int h, int w, int c) { return channel * h * w + height * w + width; } template <typename ctype, const uint32_t format, ::BorderMode bmode> struct GetSrcData { __device__ static inline int get_index(int height, int width, int channel, int h, int w, int c) { height = megcv::border_interpolate<bmode>(height, h); width = megcv::border_interpolate<bmode>(width, w); return get_offset<format>(height, width, channel, h, w, c); } }; template <typename ctype, const uint32_t format> struct GetSrcData<ctype, format, ::BorderMode::BORDER_CONSTANT> { __device__ static inline int get_index(int height, int width, int channel, int h, int w, int c) { return (height >= 0 && height < h && width >= 0 && width < w) ? get_offset<format>(height, width, channel, h, w, c) : -1; } }; template <typename ctype, const uint32_t format, ::BorderMode bmode> __global__ void kern_general(const ctype* src, const float* map_xy, const ctype* diff, float* __restrict grad, int C, int IH, int IW, int OH, int OW, float scalar) { int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; src += blockIdx.z * C * IH * IW; diff += blockIdx.z * C * OH * OW; map_xy += blockIdx.z * 2 * OH * OW; grad += blockIdx.z * 2 * OH * OW; RoundingConverter<ctype> round_converter; if (ow < OW && oh < OH) { float index_col = map_xy[oh * OW * 2 + ow * 2 + 0]; float index_row = map_xy[oh * OW * 2 + ow * 2 + 1]; int col = static_cast<int>(floor(index_col)); int row = static_cast<int>(floor(index_row)); float v = index_col - col; // alphaw float u = index_row - row; // alphah const float one = 1.f; for (int c = 0; c < C; ++c) { float hidden = static_cast<float>( diff[get_offset<format>( oh, ow, c, OH, OW, C)]); float du = 0.f, dv = 0.f; int a00 = GetSrcData<ctype, format, bmode>::get_index( row + 0, col + 0, c, IH, IW, C); int a01 = GetSrcData<ctype, format, bmode>::get_index( row + 0, col + 1, c, IH, IW, C); int a10 = GetSrcData<ctype, format, bmode>::get_index( row + 1, col + 0, c, IH, IW, C); int a11 = GetSrcData<ctype, format, bmode>::get_index( row + 1, col + 1, c, IH, IW, C); dv -= ((a00 != -1) ? src[a00] : scalar) * (one - u); dv += ((a01 != -1) ? src[a01] : scalar) * (one - u); dv -= ((a10 != -1) ? src[a10] : scalar) * u; dv += ((a11 != -1) ? src[a11] : scalar) * u; du -= ((a00 != -1) ? src[a00] : scalar) * (one - v); du -= ((a01 != -1) ? src[a01] : scalar) * v; du += ((a10 != -1) ? src[a10] : scalar) * (one - v); du += ((a11 != -1) ? src[a11] : scalar) * v; grad[oh * OW * 2 + ow * 2 + 0] += round_converter(hidden * dv); grad[oh * OW * 2 + ow * 2 + 1] += round_converter(hidden * du); } } } template <typename ctype, const uint32_t format, ::BorderMode bmode> void dispatch_backwardmat(const ctype* src, const float* map_xy, const ctype* diff, float* grad, int N, int C, int IH, int IW, int OH, int OW, float scalar, hipStream_t stream) { const int BX = 32, BY = 16; const int max_batch_size = 65535; while (N) { size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; dim3 threads(BX, BY); dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); cuda_check(hipMemsetAsync( grad, 0, sizeof(float) * curr_batch_size * OH * OW * 2, stream)); hipLaunchKernelGGL(( kern_general<ctype, format, bmode>), dim3(blocks), dim3(threads), 0, stream, src, map_xy, diff, grad, C, IH, IW, OH, OW, scalar); N -= curr_batch_size; src += curr_batch_size * C * IH * IW; diff += curr_batch_size * C * OH * OW; map_xy += curr_batch_size * 2 * OH * OW; grad += curr_batch_size * 2 * OH * OW; } } } // anonymous namespace namespace megdnn { namespace cuda { namespace remap { template <typename ctype, const uint32_t format, ::BorderMode bmode> void backwardmat_proxy(const ctype* src, const float* map_xy, const ctype* diff, float* grad, int N, int C, int IH, int IW, int OH, int OW, float scalar, hipStream_t stream) { dispatch_backwardmat<ctype, format, bmode>(src, map_xy, diff, grad, N, C, IH, IW, OH, OW, scalar, stream); after_kernel_launch(); } #define INST(ctype, format, bmode) \ template void backwardmat_proxy<ctype, param_enumv::Remap::Format::format, \ ::BorderMode::bmode>( \ const ctype*, const float*, const ctype*, float*, int, int, int, \ int, int, int, float, hipStream_t); #define FOR_FORMAT_BMODE(ctype) \ INST(ctype, NCHW, BORDER_CONSTANT) \ INST(ctype, NCHW, BORDER_REPLICATE) \ INST(ctype, NCHW, BORDER_REFLECT) \ INST(ctype, NCHW, BORDER_REFLECT_101) \ INST(ctype, NCHW, BORDER_WRAP) FOR_FORMAT_BMODE(float) MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) #undef FOR_FORMAT_BMODE #undef INST } // namespace remap } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
2ef01038d2a53eda1aca7d5a245a90a3e5f174c6.cu
/** * \file dnn/src/cuda/remap/backward_mat.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #include <cuda_runtime.h> #include "src/common/rounding_converter.cuh" #include "src/cuda/cv/kernel_common.cuh" #include "src/cuda/remap/common.h" #include "src/cuda/utils.cuh" using namespace megdnn; using namespace cuda; using namespace remap; using namespace rounding; namespace { template <const uint32_t format> __device__ inline int get_offset(int height, int width, int channel, int h, int w, int c); template <> __device__ inline int get_offset<param_enumv::Remap::Format::NCHW>( int height, int width, int channel, int h, int w, int c) { return channel * h * w + height * w + width; } template <typename ctype, const uint32_t format, ::BorderMode bmode> struct GetSrcData { __device__ static inline int get_index(int height, int width, int channel, int h, int w, int c) { height = megcv::border_interpolate<bmode>(height, h); width = megcv::border_interpolate<bmode>(width, w); return get_offset<format>(height, width, channel, h, w, c); } }; template <typename ctype, const uint32_t format> struct GetSrcData<ctype, format, ::BorderMode::BORDER_CONSTANT> { __device__ static inline int get_index(int height, int width, int channel, int h, int w, int c) { return (height >= 0 && height < h && width >= 0 && width < w) ? get_offset<format>(height, width, channel, h, w, c) : -1; } }; template <typename ctype, const uint32_t format, ::BorderMode bmode> __global__ void kern_general(const ctype* src, const float* map_xy, const ctype* diff, float* __restrict grad, int C, int IH, int IW, int OH, int OW, float scalar) { int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; src += blockIdx.z * C * IH * IW; diff += blockIdx.z * C * OH * OW; map_xy += blockIdx.z * 2 * OH * OW; grad += blockIdx.z * 2 * OH * OW; RoundingConverter<ctype> round_converter; if (ow < OW && oh < OH) { float index_col = map_xy[oh * OW * 2 + ow * 2 + 0]; float index_row = map_xy[oh * OW * 2 + ow * 2 + 1]; int col = static_cast<int>(floor(index_col)); int row = static_cast<int>(floor(index_row)); float v = index_col - col; // alphaw float u = index_row - row; // alphah const float one = 1.f; for (int c = 0; c < C; ++c) { float hidden = static_cast<float>( diff[get_offset<format>( oh, ow, c, OH, OW, C)]); float du = 0.f, dv = 0.f; int a00 = GetSrcData<ctype, format, bmode>::get_index( row + 0, col + 0, c, IH, IW, C); int a01 = GetSrcData<ctype, format, bmode>::get_index( row + 0, col + 1, c, IH, IW, C); int a10 = GetSrcData<ctype, format, bmode>::get_index( row + 1, col + 0, c, IH, IW, C); int a11 = GetSrcData<ctype, format, bmode>::get_index( row + 1, col + 1, c, IH, IW, C); dv -= ((a00 != -1) ? src[a00] : scalar) * (one - u); dv += ((a01 != -1) ? src[a01] : scalar) * (one - u); dv -= ((a10 != -1) ? src[a10] : scalar) * u; dv += ((a11 != -1) ? src[a11] : scalar) * u; du -= ((a00 != -1) ? src[a00] : scalar) * (one - v); du -= ((a01 != -1) ? src[a01] : scalar) * v; du += ((a10 != -1) ? src[a10] : scalar) * (one - v); du += ((a11 != -1) ? src[a11] : scalar) * v; grad[oh * OW * 2 + ow * 2 + 0] += round_converter(hidden * dv); grad[oh * OW * 2 + ow * 2 + 1] += round_converter(hidden * du); } } } template <typename ctype, const uint32_t format, ::BorderMode bmode> void dispatch_backwardmat(const ctype* src, const float* map_xy, const ctype* diff, float* grad, int N, int C, int IH, int IW, int OH, int OW, float scalar, cudaStream_t stream) { const int BX = 32, BY = 16; const int max_batch_size = 65535; while (N) { size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; dim3 threads(BX, BY); dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); cuda_check(cudaMemsetAsync( grad, 0, sizeof(float) * curr_batch_size * OH * OW * 2, stream)); kern_general<ctype, format, bmode><<<blocks, threads, 0, stream>>>( src, map_xy, diff, grad, C, IH, IW, OH, OW, scalar); N -= curr_batch_size; src += curr_batch_size * C * IH * IW; diff += curr_batch_size * C * OH * OW; map_xy += curr_batch_size * 2 * OH * OW; grad += curr_batch_size * 2 * OH * OW; } } } // anonymous namespace namespace megdnn { namespace cuda { namespace remap { template <typename ctype, const uint32_t format, ::BorderMode bmode> void backwardmat_proxy(const ctype* src, const float* map_xy, const ctype* diff, float* grad, int N, int C, int IH, int IW, int OH, int OW, float scalar, cudaStream_t stream) { dispatch_backwardmat<ctype, format, bmode>(src, map_xy, diff, grad, N, C, IH, IW, OH, OW, scalar, stream); after_kernel_launch(); } #define INST(ctype, format, bmode) \ template void backwardmat_proxy<ctype, param_enumv::Remap::Format::format, \ ::BorderMode::bmode>( \ const ctype*, const float*, const ctype*, float*, int, int, int, \ int, int, int, float, cudaStream_t); #define FOR_FORMAT_BMODE(ctype) \ INST(ctype, NCHW, BORDER_CONSTANT) \ INST(ctype, NCHW, BORDER_REPLICATE) \ INST(ctype, NCHW, BORDER_REFLECT) \ INST(ctype, NCHW, BORDER_REFLECT_101) \ INST(ctype, NCHW, BORDER_WRAP) FOR_FORMAT_BMODE(float) MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) #undef FOR_FORMAT_BMODE #undef INST } // namespace remap } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
0f5b4cb1fbfd930cdcaf9f3c57f9b50fd39d8bc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Two vector addition using CUDA * based on prac1b.cu * Modified by: Aryya Dwisatya W - 13512043 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "cutil_inline.h" // // kernel routine // /* Initiatin first vector with value of threads id */ __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; } /* Initiatin second vector with value of threads id */ __global__ void my_second_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; /* udah dengan konstanta jika ingin */ } /* Adding the value of second vector to the first vector */ __global__ void add_vector(float *x,float *y) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] += y[tid]; } // // main code // int main(int argc, char **argv) { float *h_x, *d_x,*d_x2; int nblocks, nthreads, nsize, n; // initialise card cutilDeviceInit(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads ; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); cudaSafeCall(hipMalloc((void **)&d_x, nsize*sizeof(float))); h_x2 = (float *)malloc(nsize*sizeof(float)); cudaSafeCall(hipMalloc((void **)&d_x2, nsize*sizeof(float))); // execute kernel /* initiating the value of first vector */ hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x); cudaCheckMsg("my_first_kernel execution failed\n"); /* initiating the value of second vector */ hipLaunchKernelGGL(( my_second_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x2); cudaCheckMsg("my_first_kernel execution failed\n"); /* Add the second vector to the first */ hipLaunchKernelGGL(( add_vector), dim3(nblocks),dim3(nthreads), 0, 0, d_x2, d_x); // copy back results and print them out /* copy the result to host vector */ cudaSafeCall( hipMemcpy(h_x,d_x2,nsize*sizeof(float), hipMemcpyDeviceToHost) ); /* print the result */ for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // free memory cudaSafeCall(hipFree(d_x)); free(h_x); // CUDA exit -- needed to flush printf write buffer hipDeviceReset(); return 0; }
0f5b4cb1fbfd930cdcaf9f3c57f9b50fd39d8bc5.cu
/* * Two vector addition using CUDA * based on prac1b.cu * Modified by: Aryya Dwisatya W - 13512043 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "cutil_inline.h" // // kernel routine // /* Initiatin first vector with value of threads id */ __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; } /* Initiatin second vector with value of threads id */ __global__ void my_second_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; /* udah dengan konstanta jika ingin */ } /* Adding the value of second vector to the first vector */ __global__ void add_vector(float *x,float *y) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] += y[tid]; } // // main code // int main(int argc, char **argv) { float *h_x, *d_x,*d_x2; int nblocks, nthreads, nsize, n; // initialise card cutilDeviceInit(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads ; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); cudaSafeCall(cudaMalloc((void **)&d_x, nsize*sizeof(float))); h_x2 = (float *)malloc(nsize*sizeof(float)); cudaSafeCall(cudaMalloc((void **)&d_x2, nsize*sizeof(float))); // execute kernel /* initiating the value of first vector */ my_first_kernel<<<nblocks,nthreads>>>(d_x); cudaCheckMsg("my_first_kernel execution failed\n"); /* initiating the value of second vector */ my_second_kernel<<<nblocks,nthreads>>>(d_x2); cudaCheckMsg("my_first_kernel execution failed\n"); /* Add the second vector to the first */ add_vector<<<nblocks,nthreads>>>(d_x2, d_x); // copy back results and print them out /* copy the result to host vector */ cudaSafeCall( cudaMemcpy(h_x,d_x2,nsize*sizeof(float), cudaMemcpyDeviceToHost) ); /* print the result */ for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // free memory cudaSafeCall(cudaFree(d_x)); free(h_x); // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 0; }
d7262cc0414b9d8e0d22794a84776d349232cbd4.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <iostream> #include <fstream> #include <ctime> #include <omp.h> #include "workspace.hxx" #include "agent.cuh" #include "vector.cuh" #include "tester.hxx" //#include "octree.hxx" #include <hip/hip_runtime.h> #define BUFF_SIZE 20 Workspace::Workspace(ArgumentParser &parser) { na = parser("agents").asInt(); wCohesion = parser("wc").asFloat(); wAlignment = parser("wa").asFloat(); wSeparation = parser("ws").asFloat(); rCohesion = parser("rc").asFloat(); rAlignment = parser("ra").asFloat(); rSeparation = parser("rs").asFloat(); dt= 0.05; maxU = 2.0; time = 0.,//; this->init();} Workspace::Workspace(size_t nAgents, Real wc, Real wa, Real ws, Real rc, Real ra, Real rs) : na(nAgents), dt(.05), time(0), wCohesion(wc), wAlignment(wa), wSeparation(ws), rCohesion(rc), rAlignment(ra), rSeparation(rs), maxU(2.) { this->init();} void Workspace::init(){ domainsize = 1.0; // Random generator seed srand48(std::time(0)); //Initializing Octree head Real maxR; maxR = (rCohesion > rSeparation) ? rCohesion : rSeparation; maxR = (maxR > rAlignment) ? maxR : rAlignment; oc = *(new Octree(2*maxR,domainsize)); // Initialize agents // This loop may be quite expensive due to random number generation //agents.reserve(na); //#pragma omp parallel //{ //#pragma omp for for(size_t j = 0; j < na; j++){ // Create random position Vector position(drand48(), drand48(), drand48()); // Create random velocity Agent *agt = new Agent(position,Zeros(),Zeros()); oc.add(*agt); } //} /*for(size_t j = 0; j < na; j++){ // Create random position Vector position(drand48(), drand48(), drand48()); // Create random velocity //agents.push_back(Agent(position, Zeros(), Zeros())); //agents.assign(j,Agent(position, Zeros(), Zeros())); oc.add(agents[j]); }*/ /* TODO build the octree */ } Agent *Workspace::tempToArray(TemporaryContainer tp){ //std::cerr << " CPU" << std::endl; Agent *res = (Agent*) malloc(tp.size()*sizeof(Agent)); for(int i =0; i<tp.size(); i++){ res[i]=*tp[i]; //std::cerr << tp[i]->position[Agent::curr_state].x << " CPU" << std::endl; } return res; } void Workspace::arrayToTemp(Agent *agts, int s,TemporaryContainer &leaf){ leaf.clear(); //std::cerr << " GPU" << std::endl; for(int i =0; i<s; i++) { leaf.push_back(&agts[i]); //std::cerr << agts[i].position[Agent::curr_state].x << " GPU" << std::endl; //std::cerr << agts[i].position[1- Agent::curr_state].x << " 1- curr GPU" << std::endl; } } __device__ Vector separation(Agent &a, Agent *agent_list, int sizeNeigh, Real *dist, Real rad, int curr) { Vector force = Vector(); int count =0; for(size_t i = 0; i < sizeNeigh; i++) { //double dist = (a.position[this->curr_state] - agent_list[i]->position[this->curr_state]).norm(); if ((dist[i] < rad) && (0<dist[i])) { // TODO the comparison is no longer needed // //force -= a.position[curr] - agent_list[i].position[curr]; force -= (a.position[curr] - agent_list[i].position[curr]).normalized(); ++count; } } force.x =0.02; return force;//( count >0 ? force/count : force); } __device__ Vector cohesion(Agent &a,Agent *agent_list, int sizeNeigh, Real *dist, Real rad, int curr) { Vector force = Vector(); int count = 0; for(size_t i = 0; i < sizeNeigh; i++) { //double dist = (a.position[this->curr_state] - agent_list[i]->position[this->curr_state]).norm(); if ((dist[i] < rad) && (0<dist[i])) { // TODO the comparison is no longer needed // force += agent_list[i].position[curr]; //force += a.position[curr] - agent_list[i].position[curr]; ++count; } } force.x =0.2; return force;//( count >0 ? force/count : force); } __device__ Vector alignment(Agent &a,Agent *agent_list, int sizeNeigh, Real *dist, Real rad, int curr) { Vector force = Vector(); int count = 0; for(size_t i = 0; i < sizeNeigh; i++) { if ((dist[i] < rad) && (0<dist[i])) { // TODO the comparison is no longer needed // force += agent_list[i].velocity[curr]; ++count; } } force.x =0.2; return force;//( count >0 ? force/count : force); } __global__ void computeOnGPU(int sizeNb, int sizeLf, Agent *agts, Agent *neigh, Real rs, Real rc, Real ra, Real wSeparation, Real wCohesion, Real wAlignment, int curr, Real maxU,Real dt){ int tileWidth = (sizeNb/sizeLf < BUFF_SIZE) ? sizeNb/sizeLf : BUFF_SIZE; __shared__ Real ds_neighInst[BUFF_SIZE*sizeof(Agent)/sizeof(Real)];//TODO mettre zero les champs __shared__ Agent *ds_neigh; ds_neigh = (Agent *) ds_neighInst; //ds_neigh = (Agent*) malloc(sizeof(Agent)*(tileWidth)); // TODO Faire gaffe un seul thread __shared__ Real ds_dist[BUFF_SIZE]; //ds_dist = (Real *) malloc(sizeof(Real)*(tileWidth)); Vector s, c, a; for (int j=0; j<sizeLf; j++){ //Chargement mmoire for (int i= 0; (i<tileWidth) && ((blockIdx.x+j)*tileWidth+i<sizeNb); i++){ ds_neigh[i]=neigh[(blockIdx.x+j)*tileWidth+i]; } __syncthreads(); //Calcul des distances for (int i= 0; i<tileWidth && ((blockIdx.x+j)*tileWidth+i<sizeNb); i++){ ds_dist[i]=(agts[blockIdx.x].position[curr]-ds_neigh[(blockIdx.x+j)*tileWidth+i].position[curr]).norm();//TODO passer norm en __global__ } __syncthreads(); //Calcul des forces //s = s += separation(agts[blockIdx.x],ds_neigh,tileWidth, ds_dist, rs, curr); c += cohesion(agts[blockIdx.x],ds_neigh,tileWidth, ds_dist, rc, curr); a += alignment(agts[blockIdx.x],ds_neigh,tileWidth, ds_dist, ra, curr); } agts[blockIdx.x].direction[1-curr] = c*wCohesion + a*wAlignment + s*wSeparation; agts[blockIdx.x].velocity[1-curr] = agts[blockIdx.x].velocity[curr] + agts[blockIdx.x].direction[1-curr]; float speed =agts[blockIdx.x].velocity[1-curr].norm(); if ((speed > maxU)) { agts[blockIdx.x].velocity[1-curr] = agts[blockIdx.x].velocity[1-curr] * maxU/speed; } agts[blockIdx.x].position[1-curr] = agts[blockIdx.x].position[curr] + agts[blockIdx.x].velocity[curr]*dt; __syncthreads(); } void Workspace::move(int step)//TODO erase step (just for tests) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp,0); int memDisp = deviceProp.totalGlobalMem; int memOccup = 0; Vector s,c,a; LeafContainer leafs = Octree::leafs; TemporaryContainer nb; //std::cout << " leaves "<< Octree::leafs.size() << std::endl; size_t i=0; std::vector<Agent **> parallelLfArray; std::vector<Agent **> d_parallelLfArray; while (i<leafs.size()){ Octree *it=leafs[i]; nb.clear(); (it)->returnNeighboursLeaf(nb); TemporaryContainer agentsleaf = (it)->agents; //Chargement mmoire sur GPU Agent *neighArray=tempToArray(nb); Agent *leafArray=tempToArray(leafs[i]->agents); Agent *d_neighArray; Agent *d_leafArray; //TODO penser supprimer les liste d'agents copies hipMalloc((void **)&d_neighArray,sizeof(Agent)*nb.size()); hipError_t err = hipGetLastError(); if(hipSuccess != err ) std::cerr << hipGetErrorString(err) << std::endl; hipMalloc((void **)&d_leafArray,sizeof(Agent)*leafs[i]->agents.size()); if(hipSuccess != err ) std::cerr << hipGetErrorString(err) << std::endl; hipMemcpy(d_neighArray,neighArray,sizeof(Agent)*nb.size(), hipMemcpyHostToDevice); if(hipSuccess != err ) std::cerr << hipGetErrorString(err) << std::endl; hipMemcpy(d_leafArray,leafArray,sizeof(Agent)*leafs[i]->agents.size(), hipMemcpyHostToDevice); if(hipSuccess != err ) std::cerr << hipGetErrorString(err) << std::endl; //Initialiser la grille dim3 dimGrid(leafs[i]->agents.size(),1,1); dim3 dimBlock(1,1,1); hipLaunchKernelGGL(( computeOnGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, nb.size(), leafs[i]->agents.size(), d_leafArray, d_neighArray, rSeparation, rCohesion, rAlignment, wSeparation, wCohesion, wAlignment, Agent::curr_state,maxU,dt); if(hipSuccess != err ) std::cerr << hipGetErrorString(err) << std::endl; // FIN FOR hipDeviceSynchronize(); hipMemcpy(leafArray,d_leafArray,sizeof(Agent)*leafs[i]->agents.size(), hipMemcpyDeviceToHost); arrayToTemp(leafArray,leafs[i]->agents.size(),leafs[i]->agents); hipFree(d_neighArray); //hipFree(neighArray); hipFree(d_leafArray); //hipFree(leafArray); hipDeviceSynchronize(); for(size_t j=0; j<agentsleaf.size(); j++){ Agent *it2=leafs[i]->agents[j]; //if(isnan(fmod(it2->position[1-Agent::curr_state].x,domainsize))) std::cout << "bof" << it2->position[1-Agent::curr_state].x << std::endl; //std::cout << "bofx" << it2->position[1-Agent::curr_state].x << std::endl; //std::cout << "vofx" << it2->velocity[1-Agent::curr_state].x << std::endl; (it2)->position[1-Agent::curr_state].x= fmod((it2)->position[1-Agent::curr_state].x,domainsize); (it2)->position[1-Agent::curr_state].y= fmod((it2)->position[1-Agent::curr_state].y,domainsize); (it2)->position[1-Agent::curr_state].z= fmod((it2)->position[1-Agent::curr_state].z,domainsize); } } Agent::curr_state = 1 - Agent::curr_state; //std::cerr << "ok1" << std::endl; update(); //std::cerr << "ok2" << std::endl; i++;//TODO A changer retirer } void Workspace::returnNeighboursBuffer(TemporaryContainer &nb, Agent *agent, Real rc, TemporaryContainer &bufC, Real ra, TemporaryContainer &bufA, Real rs, TemporaryContainer &bufS ){ for(int i=0; i<nb.size(); i++){ Real dist = (agent->position[Agent::curr_state] - nb[i]->position[Agent::curr_state]).norm(); if(dist <= rc) bufC.push_back(agent); if(dist <= ra) bufA.push_back(agent); if(dist <= rs) bufS.push_back(agent); } } void Workspace::update(){ //#pragma omp parallel for LeafContainer leafs = Octree::leafs; for (size_t i=0; i<leafs.size(); i++){ Octree *lf=leafs[i]; for (size_t j = 0; j < lf->agents.size(); j++){ Agent *ag = lf->agents[j]; //std::cerr << "test3 " << ag->position[Agent::curr_state] << " " << lf->position << std::endl; if (!(lf->agents[j]->position[Agent::curr_state] > lf->position) || !((lf->position + Vector(1,1,1)*lf->width)>= lf->agents[j]->position[Agent::curr_state] )){ lf->agents.erase(std::find(lf->agents.begin(), lf->agents.end(), lf->agents[j])); //lf->agents.remove(&agents[k]); oc.add(*ag);//*lf->agents[j]); lf->delete_leaves(); //std::cerr << "test4" << std::endl; } else { //std::cerr << "test5" << std::endl; lf->agents[j]->leaf[Agent::curr_state]=lf; } } //std::cerr << "test5" << std::endl; } /*for(size_t k = 0; k< na; k++){ if (Agent::curr_state) Octree *lf = agents[k].leaf[1-Agent::curr_state]; //Retirer de la liste si ncessaire et rajouter au bon endroit if((lf->position > agents[k].position[Agent::curr_state]) || (agents[k].position[Agent::curr_state] >= (lf->position + Vector(1,1,1)*lf->width))) { lf->agents.erase(std::find(lf->agents.begin(), lf->agents.end(), &agents[k])); //lf->agents.remove(&agents[k]); lf->delete_leaves(); oc.add(agents[k]); } else { agents[k].leaf[Agent::curr_state]=lf; } }*/ } void Workspace::simulate(int nsteps) { // store initial position[Agent::curr_state]s save(0); // perform nsteps time steps of the simulation int step = 0; while (step++ < nsteps) { //std::cout << "coco" << step << std::endl; this->move(step); //std::cerr << "ok3" << std::endl; //tst.printOctree(& this->oc); // store every 20 steps if (step%5 == 0) save(step); } } void Workspace::save(int stepid) { std::ofstream myfile; LeafContainer leafs=Octree::leafs; //std::cerr << "ok4" << std::endl; myfile.open("boids.xyz", stepid==0 ? std::ios::out : std::ios::app); //std::cerr << "ok4" << std::endl; myfile << std::endl; myfile << na << std::endl; for (size_t i=0; i<leafs.size(); i++){ Octree *lf=leafs[i]; for (size_t j = 0; j < lf->agents.size(); j++) myfile << "B " << lf->agents[j]->position[Agent::curr_state]; } myfile.close(); }
d7262cc0414b9d8e0d22794a84776d349232cbd4.cu
#include <cmath> #include <iostream> #include <fstream> #include <ctime> #include <omp.h> #include "workspace.hxx" #include "agent.cuh" #include "vector.cuh" #include "tester.hxx" //#include "octree.hxx" #include <cuda.h> #define BUFF_SIZE 20 Workspace::Workspace(ArgumentParser &parser) { na = parser("agents").asInt(); wCohesion = parser("wc").asFloat(); wAlignment = parser("wa").asFloat(); wSeparation = parser("ws").asFloat(); rCohesion = parser("rc").asFloat(); rAlignment = parser("ra").asFloat(); rSeparation = parser("rs").asFloat(); dt= 0.05; maxU = 2.0; time = 0.,//; this->init();} Workspace::Workspace(size_t nAgents, Real wc, Real wa, Real ws, Real rc, Real ra, Real rs) : na(nAgents), dt(.05), time(0), wCohesion(wc), wAlignment(wa), wSeparation(ws), rCohesion(rc), rAlignment(ra), rSeparation(rs), maxU(2.) { this->init();} void Workspace::init(){ domainsize = 1.0; // Random generator seed srand48(std::time(0)); //Initializing Octree head Real maxR; maxR = (rCohesion > rSeparation) ? rCohesion : rSeparation; maxR = (maxR > rAlignment) ? maxR : rAlignment; oc = *(new Octree(2*maxR,domainsize)); // Initialize agents // This loop may be quite expensive due to random number generation //agents.reserve(na); //#pragma omp parallel //{ //#pragma omp for for(size_t j = 0; j < na; j++){ // Create random position Vector position(drand48(), drand48(), drand48()); // Create random velocity Agent *agt = new Agent(position,Zeros(),Zeros()); oc.add(*agt); } //} /*for(size_t j = 0; j < na; j++){ // Create random position Vector position(drand48(), drand48(), drand48()); // Create random velocity //agents.push_back(Agent(position, Zeros(), Zeros())); //agents.assign(j,Agent(position, Zeros(), Zeros())); oc.add(agents[j]); }*/ /* TODO build the octree */ } Agent *Workspace::tempToArray(TemporaryContainer tp){ //std::cerr << " CPU" << std::endl; Agent *res = (Agent*) malloc(tp.size()*sizeof(Agent)); for(int i =0; i<tp.size(); i++){ res[i]=*tp[i]; //std::cerr << tp[i]->position[Agent::curr_state].x << " CPU" << std::endl; } return res; } void Workspace::arrayToTemp(Agent *agts, int s,TemporaryContainer &leaf){ leaf.clear(); //std::cerr << " GPU" << std::endl; for(int i =0; i<s; i++) { leaf.push_back(&agts[i]); //std::cerr << agts[i].position[Agent::curr_state].x << " GPU" << std::endl; //std::cerr << agts[i].position[1- Agent::curr_state].x << " 1- curr GPU" << std::endl; } } __device__ Vector separation(Agent &a, Agent *agent_list, int sizeNeigh, Real *dist, Real rad, int curr) { Vector force = Vector(); int count =0; for(size_t i = 0; i < sizeNeigh; i++) { //double dist = (a.position[this->curr_state] - agent_list[i]->position[this->curr_state]).norm(); if ((dist[i] < rad) && (0<dist[i])) { // TODO the comparison is no longer needed // //force -= a.position[curr] - agent_list[i].position[curr]; force -= (a.position[curr] - agent_list[i].position[curr]).normalized(); ++count; } } force.x =0.02; return force;//( count >0 ? force/count : force); } __device__ Vector cohesion(Agent &a,Agent *agent_list, int sizeNeigh, Real *dist, Real rad, int curr) { Vector force = Vector(); int count = 0; for(size_t i = 0; i < sizeNeigh; i++) { //double dist = (a.position[this->curr_state] - agent_list[i]->position[this->curr_state]).norm(); if ((dist[i] < rad) && (0<dist[i])) { // TODO the comparison is no longer needed // force += agent_list[i].position[curr]; //force += a.position[curr] - agent_list[i].position[curr]; ++count; } } force.x =0.2; return force;//( count >0 ? force/count : force); } __device__ Vector alignment(Agent &a,Agent *agent_list, int sizeNeigh, Real *dist, Real rad, int curr) { Vector force = Vector(); int count = 0; for(size_t i = 0; i < sizeNeigh; i++) { if ((dist[i] < rad) && (0<dist[i])) { // TODO the comparison is no longer needed // force += agent_list[i].velocity[curr]; ++count; } } force.x =0.2; return force;//( count >0 ? force/count : force); } __global__ void computeOnGPU(int sizeNb, int sizeLf, Agent *agts, Agent *neigh, Real rs, Real rc, Real ra, Real wSeparation, Real wCohesion, Real wAlignment, int curr, Real maxU,Real dt){ int tileWidth = (sizeNb/sizeLf < BUFF_SIZE) ? sizeNb/sizeLf : BUFF_SIZE; __shared__ Real ds_neighInst[BUFF_SIZE*sizeof(Agent)/sizeof(Real)];//TODO mettre à zero les champs __shared__ Agent *ds_neigh; ds_neigh = (Agent *) ds_neighInst; //ds_neigh = (Agent*) malloc(sizeof(Agent)*(tileWidth)); // TODO Faire gaffe un seul thread __shared__ Real ds_dist[BUFF_SIZE]; //ds_dist = (Real *) malloc(sizeof(Real)*(tileWidth)); Vector s, c, a; for (int j=0; j<sizeLf; j++){ //Chargement mémoire for (int i= 0; (i<tileWidth) && ((blockIdx.x+j)*tileWidth+i<sizeNb); i++){ ds_neigh[i]=neigh[(blockIdx.x+j)*tileWidth+i]; } __syncthreads(); //Calcul des distances for (int i= 0; i<tileWidth && ((blockIdx.x+j)*tileWidth+i<sizeNb); i++){ ds_dist[i]=(agts[blockIdx.x].position[curr]-ds_neigh[(blockIdx.x+j)*tileWidth+i].position[curr]).norm();//TODO passer norm en __global__ } __syncthreads(); //Calcul des forces //s = s += separation(agts[blockIdx.x],ds_neigh,tileWidth, ds_dist, rs, curr); c += cohesion(agts[blockIdx.x],ds_neigh,tileWidth, ds_dist, rc, curr); a += alignment(agts[blockIdx.x],ds_neigh,tileWidth, ds_dist, ra, curr); } agts[blockIdx.x].direction[1-curr] = c*wCohesion + a*wAlignment + s*wSeparation; agts[blockIdx.x].velocity[1-curr] = agts[blockIdx.x].velocity[curr] + agts[blockIdx.x].direction[1-curr]; float speed =agts[blockIdx.x].velocity[1-curr].norm(); if ((speed > maxU)) { agts[blockIdx.x].velocity[1-curr] = agts[blockIdx.x].velocity[1-curr] * maxU/speed; } agts[blockIdx.x].position[1-curr] = agts[blockIdx.x].position[curr] + agts[blockIdx.x].velocity[curr]*dt; __syncthreads(); } void Workspace::move(int step)//TODO erase step (just for tests) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp,0); int memDisp = deviceProp.totalGlobalMem; int memOccup = 0; Vector s,c,a; LeafContainer leafs = Octree::leafs; TemporaryContainer nb; //std::cout << " leaves "<< Octree::leafs.size() << std::endl; size_t i=0; std::vector<Agent **> parallelLfArray; std::vector<Agent **> d_parallelLfArray; while (i<leafs.size()){ Octree *it=leafs[i]; nb.clear(); (it)->returnNeighboursLeaf(nb); TemporaryContainer agentsleaf = (it)->agents; //Chargement mémoire sur GPU Agent *neighArray=tempToArray(nb); Agent *leafArray=tempToArray(leafs[i]->agents); Agent *d_neighArray; Agent *d_leafArray; //TODO penser à supprimer les liste d'agents copiées cudaMalloc((void **)&d_neighArray,sizeof(Agent)*nb.size()); cudaError err = cudaGetLastError(); if(cudaSuccess != err ) std::cerr << cudaGetErrorString(err) << std::endl; cudaMalloc((void **)&d_leafArray,sizeof(Agent)*leafs[i]->agents.size()); if(cudaSuccess != err ) std::cerr << cudaGetErrorString(err) << std::endl; cudaMemcpy(d_neighArray,neighArray,sizeof(Agent)*nb.size(), cudaMemcpyHostToDevice); if(cudaSuccess != err ) std::cerr << cudaGetErrorString(err) << std::endl; cudaMemcpy(d_leafArray,leafArray,sizeof(Agent)*leafs[i]->agents.size(), cudaMemcpyHostToDevice); if(cudaSuccess != err ) std::cerr << cudaGetErrorString(err) << std::endl; //Initialiser la grille dim3 dimGrid(leafs[i]->agents.size(),1,1); dim3 dimBlock(1,1,1); computeOnGPU<<<dimGrid,dimBlock>>>(nb.size(), leafs[i]->agents.size(), d_leafArray, d_neighArray, rSeparation, rCohesion, rAlignment, wSeparation, wCohesion, wAlignment, Agent::curr_state,maxU,dt); if(cudaSuccess != err ) std::cerr << cudaGetErrorString(err) << std::endl; // FIN FOR cudaThreadSynchronize(); cudaMemcpy(leafArray,d_leafArray,sizeof(Agent)*leafs[i]->agents.size(), cudaMemcpyDeviceToHost); arrayToTemp(leafArray,leafs[i]->agents.size(),leafs[i]->agents); cudaFree(d_neighArray); //cudaFree(neighArray); cudaFree(d_leafArray); //cudaFree(leafArray); cudaThreadSynchronize(); for(size_t j=0; j<agentsleaf.size(); j++){ Agent *it2=leafs[i]->agents[j]; //if(isnan(fmod(it2->position[1-Agent::curr_state].x,domainsize))) std::cout << "bof" << it2->position[1-Agent::curr_state].x << std::endl; //std::cout << "bofx" << it2->position[1-Agent::curr_state].x << std::endl; //std::cout << "vofx" << it2->velocity[1-Agent::curr_state].x << std::endl; (it2)->position[1-Agent::curr_state].x= fmod((it2)->position[1-Agent::curr_state].x,domainsize); (it2)->position[1-Agent::curr_state].y= fmod((it2)->position[1-Agent::curr_state].y,domainsize); (it2)->position[1-Agent::curr_state].z= fmod((it2)->position[1-Agent::curr_state].z,domainsize); } } Agent::curr_state = 1 - Agent::curr_state; //std::cerr << "ok1" << std::endl; update(); //std::cerr << "ok2" << std::endl; i++;//TODO A changer retirer } void Workspace::returnNeighboursBuffer(TemporaryContainer &nb, Agent *agent, Real rc, TemporaryContainer &bufC, Real ra, TemporaryContainer &bufA, Real rs, TemporaryContainer &bufS ){ for(int i=0; i<nb.size(); i++){ Real dist = (agent->position[Agent::curr_state] - nb[i]->position[Agent::curr_state]).norm(); if(dist <= rc) bufC.push_back(agent); if(dist <= ra) bufA.push_back(agent); if(dist <= rs) bufS.push_back(agent); } } void Workspace::update(){ //#pragma omp parallel for LeafContainer leafs = Octree::leafs; for (size_t i=0; i<leafs.size(); i++){ Octree *lf=leafs[i]; for (size_t j = 0; j < lf->agents.size(); j++){ Agent *ag = lf->agents[j]; //std::cerr << "test3 " << ag->position[Agent::curr_state] << " " << lf->position << std::endl; if (!(lf->agents[j]->position[Agent::curr_state] > lf->position) || !((lf->position + Vector(1,1,1)*lf->width)>= lf->agents[j]->position[Agent::curr_state] )){ lf->agents.erase(std::find(lf->agents.begin(), lf->agents.end(), lf->agents[j])); //lf->agents.remove(&agents[k]); oc.add(*ag);//*lf->agents[j]); lf->delete_leaves(); //std::cerr << "test4" << std::endl; } else { //std::cerr << "test5" << std::endl; lf->agents[j]->leaf[Agent::curr_state]=lf; } } //std::cerr << "test5" << std::endl; } /*for(size_t k = 0; k< na; k++){ if (Agent::curr_state) Octree *lf = agents[k].leaf[1-Agent::curr_state]; //Retirer de la liste si nécessaire et rajouter au bon endroit if((lf->position > agents[k].position[Agent::curr_state]) || (agents[k].position[Agent::curr_state] >= (lf->position + Vector(1,1,1)*lf->width))) { lf->agents.erase(std::find(lf->agents.begin(), lf->agents.end(), &agents[k])); //lf->agents.remove(&agents[k]); lf->delete_leaves(); oc.add(agents[k]); } else { agents[k].leaf[Agent::curr_state]=lf; } }*/ } void Workspace::simulate(int nsteps) { // store initial position[Agent::curr_state]s save(0); // perform nsteps time steps of the simulation int step = 0; while (step++ < nsteps) { //std::cout << "coco" << step << std::endl; this->move(step); //std::cerr << "ok3" << std::endl; //tst.printOctree(& this->oc); // store every 20 steps if (step%5 == 0) save(step); } } void Workspace::save(int stepid) { std::ofstream myfile; LeafContainer leafs=Octree::leafs; //std::cerr << "ok4" << std::endl; myfile.open("boids.xyz", stepid==0 ? std::ios::out : std::ios::app); //std::cerr << "ok4" << std::endl; myfile << std::endl; myfile << na << std::endl; for (size_t i=0; i<leafs.size(); i++){ Octree *lf=leafs[i]; for (size_t j = 0; j < lf->agents.size(); j++) myfile << "B " << lf->agents[j]->position[Agent::curr_state]; } myfile.close(); }
e690781508a15f65075eb4b8ad0f71185f8e09ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/reduce.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" #include "saiga/core/time/timer.h" namespace Saiga { namespace CUDA { // Note: // batchreduce2 is always faster or same speed as batchreduce. // the compiler can unroll the load loop in batchreduce2 but not in batchreduce // for large Ns the compiler partially unrolls the loop (~8 iterations) // nvcc $CPPFLAGS -ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr // warpStrideLoop_test.cu template <typename T, unsigned int BLOCK_SIZE, unsigned int LOCAL_WARP_SIZE, int N> __global__ static void batchReduce(ArrayView<T> in, ArrayView<T> out) { ThreadInfo<BLOCK_SIZE, LOCAL_WARP_SIZE> ti; if (ti.warp_id >= out.size()) return; int inoffset = ti.warp_id * N; int outoffset = ti.warp_id; T sum = 0; for (int i = ti.lane_id; i < N; i += LOCAL_WARP_SIZE) { sum += in[i + inoffset]; } sum = warpReduceSum<T, LOCAL_WARP_SIZE>(sum); if (ti.lane_id == 0) { out[outoffset] = sum; } } template <typename T, unsigned int BLOCK_SIZE, unsigned int LOCAL_WARP_SIZE, int N> __global__ static void batchReduce2(ArrayView<T> in, ArrayView<T> out) { ThreadInfo<BLOCK_SIZE, LOCAL_WARP_SIZE> ti; if (ti.warp_id >= out.size()) return; int inoffset = ti.warp_id * N; int outoffset = ti.warp_id; T sum = 0; // for(int k = 0, i = ti.lane_id; k < iDivUp(N,LOCAL_WARP_SIZE) ; ++k, i+=LOCAL_WARP_SIZE){ // if(i < N){ // sum += in[i+inoffset]; // } // } WARP_FOR(i, ti.lane_id, N, LOCAL_WARP_SIZE) { sum += in[i + inoffset]; } sum = warpReduceSum<T, LOCAL_WARP_SIZE>(sum); if (ti.lane_id == 0) { out[outoffset] = sum; } } #if 0 __global__ static void shflTest(){ int tid = threadIdx.x; float value = tid + 0.1f; int* ivalue = reinterpret_cast<int*>(&value); int ix = __shfl(ivalue[0],5,32); int iy = __shfl_sync(ivalue[0],5,32); float x = reinterpret_cast<float*>(&ix)[0]; float y = reinterpret_cast<float*>(&iy)[0]; if(tid == 0){ printf("shfl tmp %d %d\n",ix,iy); printf("shfl final %f %f\n",x,y); } } #endif template <int N> void warpStrideLoopTest2() { #if 0 { hipLaunchKernelGGL(( shflTest), dim3(1),dim3(32), 0, 0, ); CUDA_SYNC_CHECK_ERROR(); return; } #endif using ReduceType = int; // const int N = 32 * 100; int numEles = 1000 * 1000 * 10; // int numEles = 1000000; const int K = iDivUp(numEles, N); size_t readWrites = K * N * sizeof(ReduceType) + K * sizeof(ReduceType); CUDA::PerformanceTestHelper pth("Batch Reduce Sum N=" + std::to_string(N), readWrites); thrust::device_vector<ReduceType> in(N * K, 1); thrust::device_vector<ReduceType> out(K, 0); thrust::host_vector<ReduceType> hin = in; thrust::host_vector<ReduceType> hout = out; { float time; { ScopedTimer<float> t(&time); for (int k = 0; k < K; ++k) { int res = 0; for (int i = 0; i < N; ++i) { res += hin[i + k * N]; } hout[k] = res; } } pth.addMeassurement("CPU reduce", time); } { const int blockSize = 128; const int LOCAL_WARP_SIZE = 1; auto numBlocks = iDivUp(K * LOCAL_WARP_SIZE, blockSize); float time; { CUDA::CudaScopedTimer t2(time); hipLaunchKernelGGL(( batchReduce<ReduceType, blockSize, LOCAL_WARP_SIZE, N>), dim3(numBlocks), dim3(blockSize), 0, 0, in, out); } pth.addMeassurement("batch reduce warpsize = 1", time); SAIGA_ASSERT(out == hout); } { const int blockSize = 128; const int LOCAL_WARP_SIZE = 32; auto numBlocks = iDivUp(K * LOCAL_WARP_SIZE, blockSize); float time; { CUDA::CudaScopedTimer t2(time); hipLaunchKernelGGL(( batchReduce<ReduceType, blockSize, LOCAL_WARP_SIZE, N>), dim3(numBlocks), dim3(blockSize), 0, 0, in, out); } pth.addMeassurement("batch reduce 1", time); SAIGA_ASSERT(out == hout); } { const int blockSize = 128; const int LOCAL_WARP_SIZE = 32; auto numBlocks = iDivUp(K * LOCAL_WARP_SIZE, blockSize); float time; { CUDA::CudaScopedTimer t2(time); hipLaunchKernelGGL(( batchReduce2<ReduceType, blockSize, LOCAL_WARP_SIZE, N>), dim3(numBlocks), dim3(blockSize), 0, 0, in, out); } pth.addMeassurement("batch reduce 2", time); SAIGA_ASSERT(out == hout); } CUDA_SYNC_CHECK_ERROR(); } void warpStrideLoopTest() { warpStrideLoopTest2<57>(); warpStrideLoopTest2<320>(); warpStrideLoopTest2<1252>(); warpStrideLoopTest2<19276>(); } } // namespace CUDA } // namespace Saiga
e690781508a15f65075eb4b8ad0f71185f8e09ef.cu
/** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/reduce.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" #include "saiga/core/time/timer.h" namespace Saiga { namespace CUDA { // Note: // batchreduce2 is always faster or same speed as batchreduce. // the compiler can unroll the load loop in batchreduce2 but not in batchreduce // for large Ns the compiler partially unrolls the loop (~8 iterations) // nvcc $CPPFLAGS -ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr // warpStrideLoop_test.cu template <typename T, unsigned int BLOCK_SIZE, unsigned int LOCAL_WARP_SIZE, int N> __global__ static void batchReduce(ArrayView<T> in, ArrayView<T> out) { ThreadInfo<BLOCK_SIZE, LOCAL_WARP_SIZE> ti; if (ti.warp_id >= out.size()) return; int inoffset = ti.warp_id * N; int outoffset = ti.warp_id; T sum = 0; for (int i = ti.lane_id; i < N; i += LOCAL_WARP_SIZE) { sum += in[i + inoffset]; } sum = warpReduceSum<T, LOCAL_WARP_SIZE>(sum); if (ti.lane_id == 0) { out[outoffset] = sum; } } template <typename T, unsigned int BLOCK_SIZE, unsigned int LOCAL_WARP_SIZE, int N> __global__ static void batchReduce2(ArrayView<T> in, ArrayView<T> out) { ThreadInfo<BLOCK_SIZE, LOCAL_WARP_SIZE> ti; if (ti.warp_id >= out.size()) return; int inoffset = ti.warp_id * N; int outoffset = ti.warp_id; T sum = 0; // for(int k = 0, i = ti.lane_id; k < iDivUp(N,LOCAL_WARP_SIZE) ; ++k, i+=LOCAL_WARP_SIZE){ // if(i < N){ // sum += in[i+inoffset]; // } // } WARP_FOR(i, ti.lane_id, N, LOCAL_WARP_SIZE) { sum += in[i + inoffset]; } sum = warpReduceSum<T, LOCAL_WARP_SIZE>(sum); if (ti.lane_id == 0) { out[outoffset] = sum; } } #if 0 __global__ static void shflTest(){ int tid = threadIdx.x; float value = tid + 0.1f; int* ivalue = reinterpret_cast<int*>(&value); int ix = __shfl(ivalue[0],5,32); int iy = __shfl_sync(ivalue[0],5,32); float x = reinterpret_cast<float*>(&ix)[0]; float y = reinterpret_cast<float*>(&iy)[0]; if(tid == 0){ printf("shfl tmp %d %d\n",ix,iy); printf("shfl final %f %f\n",x,y); } } #endif template <int N> void warpStrideLoopTest2() { #if 0 { shflTest<<<1,32>>>(); CUDA_SYNC_CHECK_ERROR(); return; } #endif using ReduceType = int; // const int N = 32 * 100; int numEles = 1000 * 1000 * 10; // int numEles = 1000000; const int K = iDivUp(numEles, N); size_t readWrites = K * N * sizeof(ReduceType) + K * sizeof(ReduceType); CUDA::PerformanceTestHelper pth("Batch Reduce Sum N=" + std::to_string(N), readWrites); thrust::device_vector<ReduceType> in(N * K, 1); thrust::device_vector<ReduceType> out(K, 0); thrust::host_vector<ReduceType> hin = in; thrust::host_vector<ReduceType> hout = out; { float time; { ScopedTimer<float> t(&time); for (int k = 0; k < K; ++k) { int res = 0; for (int i = 0; i < N; ++i) { res += hin[i + k * N]; } hout[k] = res; } } pth.addMeassurement("CPU reduce", time); } { const int blockSize = 128; const int LOCAL_WARP_SIZE = 1; auto numBlocks = iDivUp(K * LOCAL_WARP_SIZE, blockSize); float time; { CUDA::CudaScopedTimer t2(time); batchReduce<ReduceType, blockSize, LOCAL_WARP_SIZE, N><<<numBlocks, blockSize>>>(in, out); } pth.addMeassurement("batch reduce warpsize = 1", time); SAIGA_ASSERT(out == hout); } { const int blockSize = 128; const int LOCAL_WARP_SIZE = 32; auto numBlocks = iDivUp(K * LOCAL_WARP_SIZE, blockSize); float time; { CUDA::CudaScopedTimer t2(time); batchReduce<ReduceType, blockSize, LOCAL_WARP_SIZE, N><<<numBlocks, blockSize>>>(in, out); } pth.addMeassurement("batch reduce 1", time); SAIGA_ASSERT(out == hout); } { const int blockSize = 128; const int LOCAL_WARP_SIZE = 32; auto numBlocks = iDivUp(K * LOCAL_WARP_SIZE, blockSize); float time; { CUDA::CudaScopedTimer t2(time); batchReduce2<ReduceType, blockSize, LOCAL_WARP_SIZE, N><<<numBlocks, blockSize>>>(in, out); } pth.addMeassurement("batch reduce 2", time); SAIGA_ASSERT(out == hout); } CUDA_SYNC_CHECK_ERROR(); } void warpStrideLoopTest() { warpStrideLoopTest2<57>(); warpStrideLoopTest2<320>(); warpStrideLoopTest2<1252>(); warpStrideLoopTest2<19276>(); } } // namespace CUDA } // namespace Saiga
2f97c0c5ff341b916023f053e7b5e4f909329673.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> void f(int *A, int *B) { for(int i = 0; i < 5; i++) A[i] = 10; } __global__ void fCuKernel(int *A, int *B) { for(int i = 0; i < 5; i++) A[threadIdx.x] = 10; } #define CHECK(r) {_check((r), __LINE__);} void _check(hipError_t r, int line) { if (r != hipSuccess) { printf("CUDA error on line %d: %s\n", line, hipGetErrorString(r)); exit(0); } } void print_arr(int *A) { for(int i = 0; i < 5; i++) printf("%d := %d\n", i, A[i]); } int main() { int *A = NULL; int *B = NULL; const int ManagedMemory = 1; printf("*** ManagedMemory: %d\n", ManagedMemory); if (ManagedMemory) { printf("CudaMallocManaged-allocating A & B...\n"); CHECK(hipMallocManaged(&A, sizeof(int) * 5, hipMemAttachGlobal)); printf("A allocated...\n"); CHECK(hipMallocManaged(&B, sizeof(int) * 5, hipMemAttachGlobal)); printf("B allocated...\n"); } else { A = (int *)malloc(sizeof(int) * 5); B = (int *)malloc(sizeof(int) * 5); } assert(A != NULL); assert(B != NULL); for(int i = 0; i < 5; i++) { B[i] = i + 1; A[i] = -42; } printf("launching kernel...\n"); // f(A, B); hipLaunchKernelGGL(( fCuKernel), dim3(1),dim3(5), 0, 0, A, B); hipDeviceSynchronize(); printf("printing A...\n"); print_arr(A); return 0; }
2f97c0c5ff341b916023f053e7b5e4f909329673.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> void f(int *A, int *B) { for(int i = 0; i < 5; i++) A[i] = 10; } __global__ void fCuKernel(int *A, int *B) { for(int i = 0; i < 5; i++) A[threadIdx.x] = 10; } #define CHECK(r) {_check((r), __LINE__);} void _check(cudaError_t r, int line) { if (r != cudaSuccess) { printf("CUDA error on line %d: %s\n", line, cudaGetErrorString(r)); exit(0); } } void print_arr(int *A) { for(int i = 0; i < 5; i++) printf("%d := %d\n", i, A[i]); } int main() { int *A = NULL; int *B = NULL; const int ManagedMemory = 1; printf("*** ManagedMemory: %d\n", ManagedMemory); if (ManagedMemory) { printf("CudaMallocManaged-allocating A & B...\n"); CHECK(cudaMallocManaged(&A, sizeof(int) * 5, cudaMemAttachGlobal)); printf("A allocated...\n"); CHECK(cudaMallocManaged(&B, sizeof(int) * 5, cudaMemAttachGlobal)); printf("B allocated...\n"); } else { A = (int *)malloc(sizeof(int) * 5); B = (int *)malloc(sizeof(int) * 5); } assert(A != NULL); assert(B != NULL); for(int i = 0; i < 5; i++) { B[i] = i + 1; A[i] = -42; } printf("launching kernel...\n"); // f(A, B); fCuKernel<<<1,5>>>(A, B); cudaDeviceSynchronize(); printf("printing A...\n"); print_arr(A); return 0; }
90cef8ee10b5757192b8d4abb93799477b5fe126.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/hip/IndexKernel.h> #include <ATen/native/IndexKernel.h> #include <type_traits> #include <ATen/core/TensorBase.h> #include <ATen/Dispatch.h> #include <ATen/core/Array.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/cub.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/KernelUtils.cuh> #include <ATen/native/quantized/IndexKernel.h> #include <c10/core/Scalar.h> namespace at::native { static constexpr int launch_bound2 = 4; static constexpr int launch_size_nd = 128; template<int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, launch_bound2) __global__ void index_elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( index_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename func_t> void gpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(static_cast<size_t>(num_indices) == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, MAX_DIMS>(0); auto strides = at::detail::Array<int64_t, MAX_DIMS>(0); auto index_ptrs = at::detail::Array<char*, MAX_DIMS>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_fill_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, scalar_t fill_val) { if (0 == iter.numel()) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds"); if (idx < 0) { idx += self_dim_size; } self_data[idx * self_dim_stride] = fill_val; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_copy_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds"); self_data[idx * self_dim_stride] = *source_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_kernel_impl(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_fill_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf, iter.dtype(), "index_fill_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; auto fill_val = source.to<scalar_t>(); auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val); index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque); }); } static void index_copy_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { // See note [Writing Nondeterministic Operations] // Nondeterministic when index contains duplicate entries // this kernel will not be called when torch.use_deterministic_algorithms(True) AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf, iter.dtype(), "index_copy_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } void index_put_kernel_quantized_cuda(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate, double scale, int zero_point) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "index_put", [&] { constexpr int64_t qmin = std::numeric_limits<typename scalar_t::underlying>::min(); constexpr int64_t qmax = std::numeric_limits<typename scalar_t::underlying>::max(); float inv_scale = 1.0f / static_cast<float>(scale); gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, qmin, qmax]C10_DEVICE(char* out_data, char* in_data, int64_t offset) { int64_t qvalue = static_cast<int64_t>(zero_point + nearbyintf(*(float*)in_data * inv_scale)); qvalue = min(max(qvalue, qmin), qmax); *(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue); }); }); } template <typename scalar_t, typename index_t, typename func_t> void cuda_take_put_kernel( TensorIterator& iter, const TensorBase& indexed, const func_t& f) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f); } return; } const auto numel = indexed.numel(); const bool is_contiguous = indexed.is_contiguous(); char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2>(iter); using uindex_t = std::make_unsigned_t<index_t>; // OffsetCalculator needs the sizes and strides reveresed const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend()); const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend()); const auto* indexed_strides_data = indexed_strides.data(); const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(), indexed_sizes.data(), &indexed_strides_data); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]); const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds"); index_t offset = static_cast<index_t>(idx); if (offset < 0) { offset += numel; } if (!is_contiguous) { offset = offset_indexed.get(offset)[0]; } f(iterated, offset); }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void put_kernel(TensorIterator& iter, const TensorBase& output, const bool accumulate) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] { // Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd` AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long, "put_cuda_index", [&] { auto* __restrict__ indexed_ptr = output.template data_ptr<scalar_t>(); if (accumulate) { index_t numel = output.numel(); cuda_take_put_kernel<scalar_t, index_t>(iter, output, [numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated); }); } else { cuda_take_put_kernel<scalar_t, index_t>(iter, output, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { indexed_ptr[offset] = iterated; }); } }); }); } void take_kernel( TensorIterator& iter, const TensorBase& input) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] { // Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long, "take_cuda_index", [&] { const auto* __restrict__ indexed_ptr = input.template data_ptr<scalar_t>(); cuda_take_put_kernel<scalar_t, index_t>(iter, input, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { iterated = indexed_ptr[offset]; }); }); }); } namespace { __global__ void masked_scatter_size_check(int64_t *mask_exclusive_sum, bool *mask, int64_t srcSize) { // Convert exclusive sum to inclusive sum auto totalElements = *mask_exclusive_sum + *mask; CUDA_KERNEL_ASSERT(totalElements <= srcSize); } } // anonymous namespace void launch_masked_scatter_kernel( const TensorBase &self, const TensorBase &mask, const TensorBase &maskPrefixSum, const TensorBase &source) { auto srcSize = source.numel(); auto mask_cont = mask.contiguous(); auto mask_numel = mask.numel(); // Use a prefix sum to determine the output locations of the masked elements auto maskPrefixSum_data = maskPrefixSum.data_ptr<int64_t>(); auto mask_data = mask_cont.data_ptr<bool>(); at::cuda::cub::mask_exclusive_sum( mask_data, maskPrefixSum_data, mask_numel); // Asynchronously check that the number of `1` elements present in the mask // must be <= the number of elements available in `src`. hipLaunchKernelGGL(( masked_scatter_size_check), dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), &maskPrefixSum_data[mask_numel - 1], &mask_data[mask_numel - 1], srcSize); C10_HIP_KERNEL_LAUNCH_CHECK(); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too auto source_contig = source.contiguous(); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(mask_cont) .add_input(maskPrefixSum) .build(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "masked_scatter_", [&]() { auto source_ptr = source_contig.data_ptr<scalar_t>(); gpu_kernel( iter, [=] GPU_LAMBDA(scalar_t a, bool mask, int64_t maskPrefixSum) -> scalar_t { if (mask) { return source_ptr[maskPrefixSum]; } return a; }); hipGetLastError(); }); } template <typename scalar_t> void flip_kernel_impl(TensorIterator& iter) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { flip_kernel_impl<scalar_t>(sub_iter); } return; } char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter); auto loop = [=]C10_DEVICE(const int i) { const auto offsets = offset_calc.get(i); // offsets can be negative here, but it's fine scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]); const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]); *out_data = *in_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void flip_kernel(TensorIterator& iter, const bool quantized) { if (quantized) { AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "flip_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel); REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); REGISTER_DISPATCH(put_stub, &put_kernel); REGISTER_DISPATCH(take_stub, &take_kernel); REGISTER_DISPATCH(flip_stub, &flip_kernel); REGISTER_CUDA_DISPATCH(index_put_kernel_quantized_stub, &index_put_kernel_quantized_cuda); } // namespace at::native
90cef8ee10b5757192b8d4abb93799477b5fe126.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/cuda/IndexKernel.h> #include <ATen/native/IndexKernel.h> #include <type_traits> #include <ATen/core/TensorBase.h> #include <ATen/Dispatch.h> #include <ATen/core/Array.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/cub.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/KernelUtils.cuh> #include <ATen/native/quantized/IndexKernel.h> #include <c10/core/Scalar.h> namespace at::native { static constexpr int launch_bound2 = 4; static constexpr int launch_size_nd = 128; template<int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, launch_bound2) __global__ void index_elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::cuda::getCurrentCUDAStream(); index_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename func_t> void gpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(static_cast<size_t>(num_indices) == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, MAX_DIMS>(0); auto strides = at::detail::Array<int64_t, MAX_DIMS>(0); auto index_ptrs = at::detail::Array<char*, MAX_DIMS>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_fill_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, scalar_t fill_val) { if (0 == iter.numel()) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds"); if (idx < 0) { idx += self_dim_size; } self_data[idx * self_dim_stride] = fill_val; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_copy_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds"); self_data[idx * self_dim_stride] = *source_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_kernel_impl(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_fill_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf, iter.dtype(), "index_fill_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; auto fill_val = source.to<scalar_t>(); auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val); index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque); }); } static void index_copy_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { // See note [Writing Nondeterministic Operations] // Nondeterministic when index contains duplicate entries // this kernel will not be called when torch.use_deterministic_algorithms(True) AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, kComplexHalf, iter.dtype(), "index_copy_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBool, kBFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } void index_put_kernel_quantized_cuda(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate, double scale, int zero_point) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "index_put", [&] { constexpr int64_t qmin = std::numeric_limits<typename scalar_t::underlying>::min(); constexpr int64_t qmax = std::numeric_limits<typename scalar_t::underlying>::max(); float inv_scale = 1.0f / static_cast<float>(scale); gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, qmin, qmax]C10_DEVICE(char* out_data, char* in_data, int64_t offset) { int64_t qvalue = static_cast<int64_t>(zero_point + nearbyintf(*(float*)in_data * inv_scale)); qvalue = min(max(qvalue, qmin), qmax); *(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue); }); }); } template <typename scalar_t, typename index_t, typename func_t> void cuda_take_put_kernel( TensorIterator& iter, const TensorBase& indexed, const func_t& f) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f); } return; } const auto numel = indexed.numel(); const bool is_contiguous = indexed.is_contiguous(); char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2>(iter); using uindex_t = std::make_unsigned_t<index_t>; // OffsetCalculator needs the sizes and strides reveresed const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend()); const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend()); const auto* indexed_strides_data = indexed_strides.data(); const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(), indexed_sizes.data(), &indexed_strides_data); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]); const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds"); index_t offset = static_cast<index_t>(idx); if (offset < 0) { offset += numel; } if (!is_contiguous) { offset = offset_indexed.get(offset)[0]; } f(iterated, offset); }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void put_kernel(TensorIterator& iter, const TensorBase& output, const bool accumulate) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] { // Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd` AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long, "put_cuda_index", [&] { auto* __restrict__ indexed_ptr = output.template data_ptr<scalar_t>(); if (accumulate) { index_t numel = output.numel(); cuda_take_put_kernel<scalar_t, index_t>(iter, output, [numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated); }); } else { cuda_take_put_kernel<scalar_t, index_t>(iter, output, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { indexed_ptr[offset] = iterated; }); } }); }); } void take_kernel( TensorIterator& iter, const TensorBase& input) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] { // Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long, "take_cuda_index", [&] { const auto* __restrict__ indexed_ptr = input.template data_ptr<scalar_t>(); cuda_take_put_kernel<scalar_t, index_t>(iter, input, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { iterated = indexed_ptr[offset]; }); }); }); } namespace { __global__ void masked_scatter_size_check(int64_t *mask_exclusive_sum, bool *mask, int64_t srcSize) { // Convert exclusive sum to inclusive sum auto totalElements = *mask_exclusive_sum + *mask; CUDA_KERNEL_ASSERT(totalElements <= srcSize); } } // anonymous namespace void launch_masked_scatter_kernel( const TensorBase &self, const TensorBase &mask, const TensorBase &maskPrefixSum, const TensorBase &source) { auto srcSize = source.numel(); auto mask_cont = mask.contiguous(); auto mask_numel = mask.numel(); // Use a prefix sum to determine the output locations of the masked elements auto maskPrefixSum_data = maskPrefixSum.data_ptr<int64_t>(); auto mask_data = mask_cont.data_ptr<bool>(); at::cuda::cub::mask_exclusive_sum( mask_data, maskPrefixSum_data, mask_numel); // Asynchronously check that the number of `1` elements present in the mask // must be <= the number of elements available in `src`. masked_scatter_size_check<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( &maskPrefixSum_data[mask_numel - 1], &mask_data[mask_numel - 1], srcSize); C10_CUDA_KERNEL_LAUNCH_CHECK(); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too auto source_contig = source.contiguous(); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(mask_cont) .add_input(maskPrefixSum) .build(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "masked_scatter_", [&]() { auto source_ptr = source_contig.data_ptr<scalar_t>(); gpu_kernel( iter, [=] GPU_LAMBDA(scalar_t a, bool mask, int64_t maskPrefixSum) -> scalar_t { if (mask) { return source_ptr[maskPrefixSum]; } return a; }); cudaGetLastError(); }); } template <typename scalar_t> void flip_kernel_impl(TensorIterator& iter) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { flip_kernel_impl<scalar_t>(sub_iter); } return; } char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter); auto loop = [=]C10_DEVICE(const int i) { const auto offsets = offset_calc.get(i); // offsets can be negative here, but it's fine scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]); const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]); *out_data = *in_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void flip_kernel(TensorIterator& iter, const bool quantized) { if (quantized) { AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "flip_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel); REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); REGISTER_DISPATCH(put_stub, &put_kernel); REGISTER_DISPATCH(take_stub, &take_kernel); REGISTER_DISPATCH(flip_stub, &flip_kernel); REGISTER_CUDA_DISPATCH(index_put_kernel_quantized_stub, &index_put_kernel_quantized_cuda); } // namespace at::native
25df5c7b60920bafdfc3bc00370f2e4392fd9938.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> #include <stdio.h> #include <math.h> #include <float.h> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void pyramid_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = (h_in + i * dilation_h)*scale_h + offset_h; const scalar_t w_im = (w_in + j * dilation_w)*scale_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void pyramid_deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = (h_in + i * dilation_h)*scale_h + offset_h; const scalar_t cur_inv_w_data = (w_in + j * dilation_w)*scale_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename scalar_t> __global__ void pyramid_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = (h_in + i * dilation_h)*scale_h + offset_h; scalar_t inv_w = (w_in + j * dilation_w)*scale_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } void pyramid_deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int inputHeight, const int inputWidth, const int offsetHeight, const int offsetWidth, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (offsetHeight + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (offsetWidth + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( pyramid_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_im_, data_offset_, inputHeight, inputWidth, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, scale_h, scale_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in pyramid deformable_im2col: %s\n", hipGetErrorString(err)); } } void pyramid_deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int inputHeight, const int inputWidth, const int offsetHeight, const int offsetWidth, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (offsetHeight + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (offsetWidth + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); hipLaunchKernelGGL(( pyramid_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_im_, data_offset_, channels, inputHeight, inputWidth, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, scale_h, scale_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } void pyramid_deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int inputHeight, const int inputWidth, const int offsetHeight, const int offsetWidth, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (offsetHeight + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (offsetWidth + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( pyramid_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_offset_, channels, inputHeight, inputWidth, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, scale_h, scale_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in pyramid deformable_col2im: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } }
25df5c7b60920bafdfc3bc00370f2e4392fd9938.cu
/*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> #include <stdio.h> #include <math.h> #include <float.h> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void pyramid_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = (h_in + i * dilation_h)*scale_h + offset_h; const scalar_t w_im = (w_in + j * dilation_w)*scale_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void pyramid_deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = (h_in + i * dilation_h)*scale_h + offset_h; const scalar_t cur_inv_w_data = (w_in + j * dilation_w)*scale_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename scalar_t> __global__ void pyramid_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = (h_in + i * dilation_h)*scale_h + offset_h; scalar_t inv_w = (w_in + j * dilation_w)*scale_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } void pyramid_deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int inputHeight, const int inputWidth, const int offsetHeight, const int offsetWidth, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (offsetHeight + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (offsetWidth + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); pyramid_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_im_, data_offset_, inputHeight, inputWidth, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, scale_h, scale_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in pyramid deformable_im2col: %s\n", cudaGetErrorString(err)); } } void pyramid_deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int inputHeight, const int inputWidth, const int offsetHeight, const int offsetWidth, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (offsetHeight + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (offsetWidth + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); pyramid_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_im_, data_offset_, channels, inputHeight, inputWidth, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, scale_h, scale_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } void pyramid_deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int inputHeight, const int inputWidth, const int offsetHeight, const int offsetWidth, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const float scale_h, const float scale_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (offsetHeight + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (offsetWidth + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); pyramid_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_offset_, channels, inputHeight, inputWidth, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, scale_h, scale_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in pyramid deformable_col2im: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>(); modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t *data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>(); modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } }
4bbc1418dd3020c13f6c09369139bafee454bb48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Matrix multiply device code #include <assert.h> #include <math.h> #include "utils.h" #include "types.h" using namespace std; __global__ void matMul(int N, _DOUBLE_ *C, _DOUBLE_ *A, _DOUBLE_ *B) { register const unsigned int TW = 32; register const unsigned int Tw[8] = {0,TW/8, TW/4, 3*TW/8, TW/2, 5*TW/8, 3*TW/4, 7*TW/8}; // Array used to do unrolling. const unsigned int edge_limit = N/TW; register const unsigned int ty = threadIdx.y, tx = threadIdx.x; register const unsigned int by = blockIdx.y, bx = blockIdx.x; register const unsigned int I = by*TW + ty, J = bx*TW + tx; __shared__ _DOUBLE_ As[TW][TW], Bs[TW][TW]; // Shared memory used to store the tiles of A and B register _DOUBLE_ Cs[8] = {0}; // Array used to store the computed value of C. register int i = 0; register int kk = 0; // This section of the code handles the initialising of the As and Bs to 0. #pragma unroll for(i = 0; i < 8; i++) { As[ty+Tw[i]][tx] = 0; } #pragma unroll for(i = 0; i < 8; i++) { Bs[ty+Tw[i]][tx] = 0; } // This section of the code handles the corner cases. if(N%TW !=0) { register int a_1 = ty - I; register int a_2 = edge_limit*TW+tx; for(int Ai =I; (Ai < N) && (Ai < I + TW); Ai +=Tw[1]) { if(edge_limit*TW+tx < N) As[a_1+Ai][tx] = A[(Ai)*N+a_2]; } register int b_start = edge_limit*TW+ty; register int b_1 = edge_limit*TW; for(int Bi = b_start;(Bi<N) && (Bi < b_start+TW) ; Bi+=Tw[1]) { if(J < N) Bs[Bi - b_1][tx] = B[Bi*N+J]; } __syncthreads(); // This section handles the computation of Cs for(int k = 0; k < TW; k++) { #pragma unroll for(i = 0 ;i < 8; i++) { Cs[i] += As[ty+Tw[i]][k] * Bs[k][tx]; } } } __syncthreads(); // This seciton of the code is for the perfect case without corner cases. for( kk = 0; kk < edge_limit; kk++) { // Loading of A into As and B into Bs #pragma unroll for(i = 0; i < 8; i++) { As[ty+Tw[i]][tx] = A[(I+Tw[i])*N+(kk*TW+tx)]; } register int B_1 = kk*TW+ty; #pragma unroll for(i = 0; i < 8; i++) { Bs[ty+Tw[i]][tx] = B[(B_1+Tw[i])*N+J]; } __syncthreads(); // Computing the values of C for(int k = 0; k < TW; k++) { #pragma unroll for(i = 0; i < 8; i++) { Cs[i] += As[ty+Tw[i]][k] * Bs[k][tx]; } } __syncthreads(); } //Storing the values of Cs to C while checking the indices within range if((I<N)&&(J<N)) { C[I*N+J] = Cs[0]; if(I+Tw[1] < N) C[(I+Tw[1])*N+J] = Cs[1]; if(I+Tw[2] < N) C[(I+Tw[2])*N+J] = Cs[2]; if(I+Tw[3] < N) C[(I+Tw[3])*N+J] = Cs[3]; if(I+Tw[4] < N) C[(I+Tw[4])*N+J] = Cs[4]; if(I+Tw[5] < N) C[(I+Tw[5])*N+J] = Cs[5]; if(I+Tw[6] < N) C[(I+Tw[6])*N+J] = Cs[6]; if(I+Tw[7] < N) C[(I+Tw[7])*N+J] = Cs[7]; } }
4bbc1418dd3020c13f6c09369139bafee454bb48.cu
// Matrix multiply device code #include <assert.h> #include <math.h> #include "utils.h" #include "types.h" using namespace std; __global__ void matMul(int N, _DOUBLE_ *C, _DOUBLE_ *A, _DOUBLE_ *B) { register const unsigned int TW = 32; register const unsigned int Tw[8] = {0,TW/8, TW/4, 3*TW/8, TW/2, 5*TW/8, 3*TW/4, 7*TW/8}; // Array used to do unrolling. const unsigned int edge_limit = N/TW; register const unsigned int ty = threadIdx.y, tx = threadIdx.x; register const unsigned int by = blockIdx.y, bx = blockIdx.x; register const unsigned int I = by*TW + ty, J = bx*TW + tx; __shared__ _DOUBLE_ As[TW][TW], Bs[TW][TW]; // Shared memory used to store the tiles of A and B register _DOUBLE_ Cs[8] = {0}; // Array used to store the computed value of C. register int i = 0; register int kk = 0; // This section of the code handles the initialising of the As and Bs to 0. #pragma unroll for(i = 0; i < 8; i++) { As[ty+Tw[i]][tx] = 0; } #pragma unroll for(i = 0; i < 8; i++) { Bs[ty+Tw[i]][tx] = 0; } // This section of the code handles the corner cases. if(N%TW !=0) { register int a_1 = ty - I; register int a_2 = edge_limit*TW+tx; for(int Ai =I; (Ai < N) && (Ai < I + TW); Ai +=Tw[1]) { if(edge_limit*TW+tx < N) As[a_1+Ai][tx] = A[(Ai)*N+a_2]; } register int b_start = edge_limit*TW+ty; register int b_1 = edge_limit*TW; for(int Bi = b_start;(Bi<N) && (Bi < b_start+TW) ; Bi+=Tw[1]) { if(J < N) Bs[Bi - b_1][tx] = B[Bi*N+J]; } __syncthreads(); // This section handles the computation of Cs for(int k = 0; k < TW; k++) { #pragma unroll for(i = 0 ;i < 8; i++) { Cs[i] += As[ty+Tw[i]][k] * Bs[k][tx]; } } } __syncthreads(); // This seciton of the code is for the perfect case without corner cases. for( kk = 0; kk < edge_limit; kk++) { // Loading of A into As and B into Bs #pragma unroll for(i = 0; i < 8; i++) { As[ty+Tw[i]][tx] = A[(I+Tw[i])*N+(kk*TW+tx)]; } register int B_1 = kk*TW+ty; #pragma unroll for(i = 0; i < 8; i++) { Bs[ty+Tw[i]][tx] = B[(B_1+Tw[i])*N+J]; } __syncthreads(); // Computing the values of C for(int k = 0; k < TW; k++) { #pragma unroll for(i = 0; i < 8; i++) { Cs[i] += As[ty+Tw[i]][k] * Bs[k][tx]; } } __syncthreads(); } //Storing the values of Cs to C while checking the indices within range if((I<N)&&(J<N)) { C[I*N+J] = Cs[0]; if(I+Tw[1] < N) C[(I+Tw[1])*N+J] = Cs[1]; if(I+Tw[2] < N) C[(I+Tw[2])*N+J] = Cs[2]; if(I+Tw[3] < N) C[(I+Tw[3])*N+J] = Cs[3]; if(I+Tw[4] < N) C[(I+Tw[4])*N+J] = Cs[4]; if(I+Tw[5] < N) C[(I+Tw[5])*N+J] = Cs[5]; if(I+Tw[6] < N) C[(I+Tw[6])*N+J] = Cs[6]; if(I+Tw[7] < N) C[(I+Tw[7])*N+J] = Cs[7]; } }
73cd1fd21e14848a4baa59cc8bc9bbc7bc528e28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/Distance.cuh> #include <faiss/gpu/impl/BroadcastSum.cuh> #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/gpu/impl/L2Select.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/impl/AuxIndexStructures.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/Limits.cuh> #include <faiss/gpu/utils/MatrixMult.cuh> #include <faiss/gpu/utils/BlockSelectKernel.cuh> #include <memory> #include <algorithm> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> namespace faiss { namespace gpu { namespace { template <typename T> Tensor<T, 2, true> sliceCentroids(Tensor<T, 2, true>& centroids, bool centroidsRowMajor, int startCentroid, int num) { // Row major is (num, dim) // Col major is (dim, num) if (startCentroid == 0 && num == centroids.getSize(centroidsRowMajor ? 0 : 1)) { return centroids; } return centroids.narrow(centroidsRowMajor ? 0 : 1, startCentroid, num); } // For each chunk of k indices, increment the index by chunk * increment template <typename T> __global__ void incrementIndex(Tensor<T, 2, true> indices, int k, int increment) { for (int i = threadIdx.x; i < k; i += blockDim.x) { indices[blockIdx.y][blockIdx.x * k + i] += blockIdx.x * increment; } } // Used to update result indices in distance computation where the number of // centroids is high, and is tiled template <typename T> void runIncrementIndex(Tensor<T, 2, true>& indices, int k, int increment, hipStream_t stream) { dim3 grid(indices.getSize(1) / k, indices.getSize(0)); int block = ::min(k, 512); // should be exact FAISS_ASSERT(grid.x * k == indices.getSize(1)); hipLaunchKernelGGL(( incrementIndex), dim3(grid), dim3(block), 0, stream, indices, k, increment); hipDeviceSynchronize(); } // If the inner size (dim) of the vectors is small, we want a larger query tile // size, like 1024 void chooseTileSize(int numQueries, int numCentroids, int dim, int elementSize, size_t tempMemAvailable, int& tileRows, int& tileCols) { // The matrix multiplication should be large enough to be efficient, but if it // is too large, we seem to lose efficiency as opposed to double-streaming. // Each tile size here defines 1/2 of the memory use due to double streaming. // We ignore available temporary memory, as that is adjusted independently by // the user and can thus meet these requirements (or not). // For <= 4 GB GPUs, prefer 512 MB of usage. // For <= 8 GB GPUs, prefer 768 MB of usage. // Otherwise, prefer 1 GB of usage. auto totalMem = getCurrentDeviceProperties().totalGlobalMem; int targetUsage = 0; if (totalMem <= ((size_t) 4) * 1024 * 1024 * 1024) { targetUsage = 512 * 1024 * 1024; } else if (totalMem <= ((size_t) 8) * 1024 * 1024 * 1024) { targetUsage = 768 * 1024 * 1024; } else { targetUsage = 1024 * 1024 * 1024; } targetUsage /= 2 * elementSize; // 512 seems to be a batch size sweetspot for float32. // If we are on float16, increase to 512. // If the k size (vec dim) of the matrix multiplication is small (<= 32), // increase to 1024. int preferredTileRows = 512; if (dim <= 32) { preferredTileRows = 1024; } tileRows = ::min(preferredTileRows, numQueries); // tileCols is the remainder size tileCols = ::min(targetUsage / preferredTileRows, numCentroids); } } template <typename T> void runDistance(bool computeL2, GpuResources* resources, Tensor<T, 2, true>& centroids, bool centroidsRowMajor, Tensor<T, 1, true>* centroidNorms, Tensor<T, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm, bool ignoreOutDistances) { // The # of centroids in `centroids` based on memory layout auto numCentroids = centroids.getSize(centroidsRowMajor ? 0 : 1); // The # of queries in `queries` based on memory layout auto numQueries = queries.getSize(queriesRowMajor ? 0 : 1); // The dimensions of the vectors to consider auto dim = queries.getSize(queriesRowMajor ? 1 : 0); FAISS_ASSERT((numQueries == 0 || numCentroids == 0) || dim == centroids.getSize(centroidsRowMajor ? 1 : 0)); FAISS_ASSERT(outDistances.getSize(0) == numQueries); FAISS_ASSERT(outIndices.getSize(0) == numQueries); FAISS_ASSERT(outDistances.getSize(1) == k); FAISS_ASSERT(outIndices.getSize(1) == k); auto& mem = resources->getMemoryManagerCurrentDevice(); auto defaultStream = resources->getDefaultStreamCurrentDevice(); // If we're quering against a 0 sized set, just return empty results if (centroids.numElements() == 0) { thrust::fill(thrust::hip::par.on(defaultStream), outDistances.data(), outDistances.end(), Limits<T>::getMax()); thrust::fill(thrust::hip::par.on(defaultStream), outIndices.data(), outIndices.end(), -1); return; } // L2: If ||c||^2 is not pre-computed, calculate it DeviceTensor<T, 1, true> cNorms; if (computeL2 && !centroidNorms) { cNorms = std::move(DeviceTensor<T, 1, true>(mem, {numCentroids}, defaultStream)); runL2Norm(centroids, centroidsRowMajor, cNorms, true, defaultStream); centroidNorms = &cNorms; } // // Prepare norm vector ||q||^2; ||c||^2 is already pre-computed // int qNormSize[1] = {numQueries}; DeviceTensor<T, 1, true> queryNorms(mem, qNormSize, defaultStream); // ||q||^2 if (computeL2) { runL2Norm(queries, queriesRowMajor, queryNorms, true, defaultStream); } // By default, aim to use up to 512 MB of memory for the processing, with both // number of queries and number of centroids being at least 512. int tileRows = 0; int tileCols = 0; chooseTileSize(numQueries, numCentroids, dim, sizeof(T), mem.getSizeAvailable(), tileRows, tileCols); int numColTiles = utils::divUp(numCentroids, tileCols); // We can have any number of vectors to query against, even less than k, in // which case we'll return -1 for the index FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation // Temporary output memory space we'll use DeviceTensor<T, 2, true> distanceBuf1( mem, {tileRows, tileCols}, defaultStream); DeviceTensor<T, 2, true> distanceBuf2( mem, {tileRows, tileCols}, defaultStream); DeviceTensor<T, 2, true>* distanceBufs[2] = {&distanceBuf1, &distanceBuf2}; DeviceTensor<T, 2, true> outDistanceBuf1( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<T, 2, true> outDistanceBuf2( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<T, 2, true>* outDistanceBufs[2] = {&outDistanceBuf1, &outDistanceBuf2}; DeviceTensor<int, 2, true> outIndexBuf1( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<int, 2, true> outIndexBuf2( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<int, 2, true>* outIndexBufs[2] = {&outIndexBuf1, &outIndexBuf2}; auto streams = resources->getAlternateStreamsCurrentDevice(); streamWait(streams, {defaultStream}); int curStream = 0; bool interrupt = false; // Tile over the input queries for (int i = 0; i < numQueries; i += tileRows) { if (interrupt || InterruptCallback::is_interrupted()) { interrupt = true; break; } int curQuerySize = ::min(tileRows, numQueries - i); auto outDistanceView = outDistances.narrow(0, i, curQuerySize); auto outIndexView = outIndices.narrow(0, i, curQuerySize); auto queryView = queries.narrow(queriesRowMajor ? 0 : 1, i, curQuerySize); auto queryNormNiew = queryNorms.narrow(0, i, curQuerySize); auto outDistanceBufRowView = outDistanceBufs[curStream]->narrow(0, 0, curQuerySize); auto outIndexBufRowView = outIndexBufs[curStream]->narrow(0, 0, curQuerySize); // Tile over the centroids for (int j = 0; j < numCentroids; j += tileCols) { if (InterruptCallback::is_interrupted()) { interrupt = true; break; } int curCentroidSize = ::min(tileCols, numCentroids - j); int curColTile = j / tileCols; auto centroidsView = sliceCentroids(centroids, centroidsRowMajor, j, curCentroidSize); auto distanceBufView = distanceBufs[curStream]-> narrow(0, 0, curQuerySize).narrow(1, 0, curCentroidSize); auto outDistanceBufColView = outDistanceBufRowView.narrow(1, k * curColTile, k); auto outIndexBufColView = outIndexBufRowView.narrow(1, k * curColTile, k); // L2: distance is ||c||^2 - 2qc + ||q||^2, we compute -2qc // IP: just compute qc // (query id x dim) x (centroid id, dim)' = (query id, centroid id) runMatrixMult(distanceBufView, false, // not transposed queryView, !queriesRowMajor, // transposed MM if col major centroidsView, centroidsRowMajor, // transposed MM if row major computeL2 ? -2.0f : 1.0f, 0.0f, useHgemm, resources->getBlasHandleCurrentDevice(), streams[curStream]); if (computeL2) { // For L2 distance, we use this fused kernel that performs both // adding ||c||^2 to -2qc and k-selection, so we only need two // passes (one write by the gemm, one read here) over the huge // region of output memory // // If we aren't tiling along the number of centroids, we can perform the // output work directly if (tileCols == numCentroids) { // Write into the final output runL2SelectMin(distanceBufView, *centroidNorms, bitset, outDistanceView, outIndexView, k, streams[curStream]); if (!ignoreOutDistances) { // expand (query id) to (query id, k) by duplicating along rows // top-k ||c||^2 - 2qc + ||q||^2 in the form (query id, k) runSumAlongRows(queryNormNiew, outDistanceView, true, // L2 distances should not go below zero due // to roundoff error streams[curStream]); } } else { auto centroidNormsView = centroidNorms->narrow(0, j, curCentroidSize); // Write into our intermediate output runL2SelectMin(distanceBufView, centroidNormsView, bitset, outDistanceBufColView, outIndexBufColView, k, streams[curStream]); if (!ignoreOutDistances) { // expand (query id) to (query id, k) by duplicating along rows // top-k ||c||^2 - 2qc + ||q||^2 in the form (query id, k) runSumAlongRows(queryNormNiew, outDistanceBufColView, true, // L2 distances should not go below zero due // to roundoff error streams[curStream]); } } } else { // For IP, just k-select the output for this tile if (tileCols == numCentroids) { // Write into the final output runBlockSelect(distanceBufView, bitset, outDistanceView, outIndexView, true, k, streams[curStream]); } else { // Write into the intermediate output runBlockSelect(distanceBufView, bitset, outDistanceBufColView, outIndexBufColView, true, k, streams[curStream]); } } } // As we're finished with processing a full set of centroids, perform the // final k-selection if (tileCols != numCentroids) { // The indices are tile-relative; for each tile of k, we need to add // tileCols to the index runIncrementIndex(outIndexBufRowView, k, tileCols, streams[curStream]); runBlockSelectPair(outDistanceBufRowView, outIndexBufRowView, bitset, outDistanceView, outIndexView, computeL2 ? false : true, k, streams[curStream]); } curStream = (curStream + 1) % 2; } // Have the desired ordering stream wait on the multi-stream streamWait({defaultStream}, streams); if (interrupt) { FAISS_THROW_MSG("interrupted"); } } // Bitset added template <typename T> void runL2Distance(GpuResources* resources, Tensor<T, 2, true>& centroids, bool centroidsRowMajor, Tensor<T, 1, true>* centroidNorms, Tensor<T, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm, bool ignoreOutDistances = false) { runDistance<T>(true, // L2 resources, centroids, centroidsRowMajor, centroidNorms, queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm, ignoreOutDistances); } template <typename T> void runIPDistance(GpuResources* resources, Tensor<T, 2, true>& centroids, bool centroidsRowMajor, Tensor<T, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm) { runDistance<T>(false, // IP resources, centroids, centroidsRowMajor, nullptr, // no centroid norms provided queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm, false); } // // Instantiations of the distance templates // void runIPDistance(GpuResources* resources, Tensor<float, 2, true>& vectors, bool vectorsRowMajor, Tensor<float, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<float, 2, true>& outDistances, Tensor<int, 2, true>& outIndices) { runIPDistance<float>(resources, vectors, vectorsRowMajor, queries, queriesRowMajor, bitset, k, outDistances, outIndices, false); } void runIPDistance(GpuResources* resources, Tensor<half, 2, true>& vectors, bool vectorsRowMajor, Tensor<half, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<half, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm) { runIPDistance<half>(resources, vectors, vectorsRowMajor, queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm); } void runL2Distance(GpuResources* resources, Tensor<float, 2, true>& vectors, bool vectorsRowMajor, Tensor<float, 1, true>* vectorNorms, Tensor<float, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<float, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool ignoreOutDistances) { runL2Distance<float>(resources, vectors, vectorsRowMajor, vectorNorms, queries, queriesRowMajor, bitset, k, outDistances, outIndices, false, ignoreOutDistances); } void runL2Distance(GpuResources* resources, Tensor<half, 2, true>& vectors, bool vectorsRowMajor, Tensor<half, 1, true>* vectorNorms, Tensor<half, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<half, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm, bool ignoreOutDistances) { runL2Distance<half>(resources, vectors, vectorsRowMajor, vectorNorms, queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm, ignoreOutDistances); } } } // namespace
73cd1fd21e14848a4baa59cc8bc9bbc7bc528e28.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/Distance.cuh> #include <faiss/gpu/impl/BroadcastSum.cuh> #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/gpu/impl/L2Select.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/impl/AuxIndexStructures.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/Limits.cuh> #include <faiss/gpu/utils/MatrixMult.cuh> #include <faiss/gpu/utils/BlockSelectKernel.cuh> #include <memory> #include <algorithm> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> namespace faiss { namespace gpu { namespace { template <typename T> Tensor<T, 2, true> sliceCentroids(Tensor<T, 2, true>& centroids, bool centroidsRowMajor, int startCentroid, int num) { // Row major is (num, dim) // Col major is (dim, num) if (startCentroid == 0 && num == centroids.getSize(centroidsRowMajor ? 0 : 1)) { return centroids; } return centroids.narrow(centroidsRowMajor ? 0 : 1, startCentroid, num); } // For each chunk of k indices, increment the index by chunk * increment template <typename T> __global__ void incrementIndex(Tensor<T, 2, true> indices, int k, int increment) { for (int i = threadIdx.x; i < k; i += blockDim.x) { indices[blockIdx.y][blockIdx.x * k + i] += blockIdx.x * increment; } } // Used to update result indices in distance computation where the number of // centroids is high, and is tiled template <typename T> void runIncrementIndex(Tensor<T, 2, true>& indices, int k, int increment, cudaStream_t stream) { dim3 grid(indices.getSize(1) / k, indices.getSize(0)); int block = std::min(k, 512); // should be exact FAISS_ASSERT(grid.x * k == indices.getSize(1)); incrementIndex<<<grid, block, 0, stream>>>(indices, k, increment); cudaDeviceSynchronize(); } // If the inner size (dim) of the vectors is small, we want a larger query tile // size, like 1024 void chooseTileSize(int numQueries, int numCentroids, int dim, int elementSize, size_t tempMemAvailable, int& tileRows, int& tileCols) { // The matrix multiplication should be large enough to be efficient, but if it // is too large, we seem to lose efficiency as opposed to double-streaming. // Each tile size here defines 1/2 of the memory use due to double streaming. // We ignore available temporary memory, as that is adjusted independently by // the user and can thus meet these requirements (or not). // For <= 4 GB GPUs, prefer 512 MB of usage. // For <= 8 GB GPUs, prefer 768 MB of usage. // Otherwise, prefer 1 GB of usage. auto totalMem = getCurrentDeviceProperties().totalGlobalMem; int targetUsage = 0; if (totalMem <= ((size_t) 4) * 1024 * 1024 * 1024) { targetUsage = 512 * 1024 * 1024; } else if (totalMem <= ((size_t) 8) * 1024 * 1024 * 1024) { targetUsage = 768 * 1024 * 1024; } else { targetUsage = 1024 * 1024 * 1024; } targetUsage /= 2 * elementSize; // 512 seems to be a batch size sweetspot for float32. // If we are on float16, increase to 512. // If the k size (vec dim) of the matrix multiplication is small (<= 32), // increase to 1024. int preferredTileRows = 512; if (dim <= 32) { preferredTileRows = 1024; } tileRows = std::min(preferredTileRows, numQueries); // tileCols is the remainder size tileCols = std::min(targetUsage / preferredTileRows, numCentroids); } } template <typename T> void runDistance(bool computeL2, GpuResources* resources, Tensor<T, 2, true>& centroids, bool centroidsRowMajor, Tensor<T, 1, true>* centroidNorms, Tensor<T, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm, bool ignoreOutDistances) { // The # of centroids in `centroids` based on memory layout auto numCentroids = centroids.getSize(centroidsRowMajor ? 0 : 1); // The # of queries in `queries` based on memory layout auto numQueries = queries.getSize(queriesRowMajor ? 0 : 1); // The dimensions of the vectors to consider auto dim = queries.getSize(queriesRowMajor ? 1 : 0); FAISS_ASSERT((numQueries == 0 || numCentroids == 0) || dim == centroids.getSize(centroidsRowMajor ? 1 : 0)); FAISS_ASSERT(outDistances.getSize(0) == numQueries); FAISS_ASSERT(outIndices.getSize(0) == numQueries); FAISS_ASSERT(outDistances.getSize(1) == k); FAISS_ASSERT(outIndices.getSize(1) == k); auto& mem = resources->getMemoryManagerCurrentDevice(); auto defaultStream = resources->getDefaultStreamCurrentDevice(); // If we're quering against a 0 sized set, just return empty results if (centroids.numElements() == 0) { thrust::fill(thrust::cuda::par.on(defaultStream), outDistances.data(), outDistances.end(), Limits<T>::getMax()); thrust::fill(thrust::cuda::par.on(defaultStream), outIndices.data(), outIndices.end(), -1); return; } // L2: If ||c||^2 is not pre-computed, calculate it DeviceTensor<T, 1, true> cNorms; if (computeL2 && !centroidNorms) { cNorms = std::move(DeviceTensor<T, 1, true>(mem, {numCentroids}, defaultStream)); runL2Norm(centroids, centroidsRowMajor, cNorms, true, defaultStream); centroidNorms = &cNorms; } // // Prepare norm vector ||q||^2; ||c||^2 is already pre-computed // int qNormSize[1] = {numQueries}; DeviceTensor<T, 1, true> queryNorms(mem, qNormSize, defaultStream); // ||q||^2 if (computeL2) { runL2Norm(queries, queriesRowMajor, queryNorms, true, defaultStream); } // By default, aim to use up to 512 MB of memory for the processing, with both // number of queries and number of centroids being at least 512. int tileRows = 0; int tileCols = 0; chooseTileSize(numQueries, numCentroids, dim, sizeof(T), mem.getSizeAvailable(), tileRows, tileCols); int numColTiles = utils::divUp(numCentroids, tileCols); // We can have any number of vectors to query against, even less than k, in // which case we'll return -1 for the index FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation // Temporary output memory space we'll use DeviceTensor<T, 2, true> distanceBuf1( mem, {tileRows, tileCols}, defaultStream); DeviceTensor<T, 2, true> distanceBuf2( mem, {tileRows, tileCols}, defaultStream); DeviceTensor<T, 2, true>* distanceBufs[2] = {&distanceBuf1, &distanceBuf2}; DeviceTensor<T, 2, true> outDistanceBuf1( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<T, 2, true> outDistanceBuf2( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<T, 2, true>* outDistanceBufs[2] = {&outDistanceBuf1, &outDistanceBuf2}; DeviceTensor<int, 2, true> outIndexBuf1( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<int, 2, true> outIndexBuf2( mem, {tileRows, numColTiles * k}, defaultStream); DeviceTensor<int, 2, true>* outIndexBufs[2] = {&outIndexBuf1, &outIndexBuf2}; auto streams = resources->getAlternateStreamsCurrentDevice(); streamWait(streams, {defaultStream}); int curStream = 0; bool interrupt = false; // Tile over the input queries for (int i = 0; i < numQueries; i += tileRows) { if (interrupt || InterruptCallback::is_interrupted()) { interrupt = true; break; } int curQuerySize = std::min(tileRows, numQueries - i); auto outDistanceView = outDistances.narrow(0, i, curQuerySize); auto outIndexView = outIndices.narrow(0, i, curQuerySize); auto queryView = queries.narrow(queriesRowMajor ? 0 : 1, i, curQuerySize); auto queryNormNiew = queryNorms.narrow(0, i, curQuerySize); auto outDistanceBufRowView = outDistanceBufs[curStream]->narrow(0, 0, curQuerySize); auto outIndexBufRowView = outIndexBufs[curStream]->narrow(0, 0, curQuerySize); // Tile over the centroids for (int j = 0; j < numCentroids; j += tileCols) { if (InterruptCallback::is_interrupted()) { interrupt = true; break; } int curCentroidSize = std::min(tileCols, numCentroids - j); int curColTile = j / tileCols; auto centroidsView = sliceCentroids(centroids, centroidsRowMajor, j, curCentroidSize); auto distanceBufView = distanceBufs[curStream]-> narrow(0, 0, curQuerySize).narrow(1, 0, curCentroidSize); auto outDistanceBufColView = outDistanceBufRowView.narrow(1, k * curColTile, k); auto outIndexBufColView = outIndexBufRowView.narrow(1, k * curColTile, k); // L2: distance is ||c||^2 - 2qc + ||q||^2, we compute -2qc // IP: just compute qc // (query id x dim) x (centroid id, dim)' = (query id, centroid id) runMatrixMult(distanceBufView, false, // not transposed queryView, !queriesRowMajor, // transposed MM if col major centroidsView, centroidsRowMajor, // transposed MM if row major computeL2 ? -2.0f : 1.0f, 0.0f, useHgemm, resources->getBlasHandleCurrentDevice(), streams[curStream]); if (computeL2) { // For L2 distance, we use this fused kernel that performs both // adding ||c||^2 to -2qc and k-selection, so we only need two // passes (one write by the gemm, one read here) over the huge // region of output memory // // If we aren't tiling along the number of centroids, we can perform the // output work directly if (tileCols == numCentroids) { // Write into the final output runL2SelectMin(distanceBufView, *centroidNorms, bitset, outDistanceView, outIndexView, k, streams[curStream]); if (!ignoreOutDistances) { // expand (query id) to (query id, k) by duplicating along rows // top-k ||c||^2 - 2qc + ||q||^2 in the form (query id, k) runSumAlongRows(queryNormNiew, outDistanceView, true, // L2 distances should not go below zero due // to roundoff error streams[curStream]); } } else { auto centroidNormsView = centroidNorms->narrow(0, j, curCentroidSize); // Write into our intermediate output runL2SelectMin(distanceBufView, centroidNormsView, bitset, outDistanceBufColView, outIndexBufColView, k, streams[curStream]); if (!ignoreOutDistances) { // expand (query id) to (query id, k) by duplicating along rows // top-k ||c||^2 - 2qc + ||q||^2 in the form (query id, k) runSumAlongRows(queryNormNiew, outDistanceBufColView, true, // L2 distances should not go below zero due // to roundoff error streams[curStream]); } } } else { // For IP, just k-select the output for this tile if (tileCols == numCentroids) { // Write into the final output runBlockSelect(distanceBufView, bitset, outDistanceView, outIndexView, true, k, streams[curStream]); } else { // Write into the intermediate output runBlockSelect(distanceBufView, bitset, outDistanceBufColView, outIndexBufColView, true, k, streams[curStream]); } } } // As we're finished with processing a full set of centroids, perform the // final k-selection if (tileCols != numCentroids) { // The indices are tile-relative; for each tile of k, we need to add // tileCols to the index runIncrementIndex(outIndexBufRowView, k, tileCols, streams[curStream]); runBlockSelectPair(outDistanceBufRowView, outIndexBufRowView, bitset, outDistanceView, outIndexView, computeL2 ? false : true, k, streams[curStream]); } curStream = (curStream + 1) % 2; } // Have the desired ordering stream wait on the multi-stream streamWait({defaultStream}, streams); if (interrupt) { FAISS_THROW_MSG("interrupted"); } } // Bitset added template <typename T> void runL2Distance(GpuResources* resources, Tensor<T, 2, true>& centroids, bool centroidsRowMajor, Tensor<T, 1, true>* centroidNorms, Tensor<T, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm, bool ignoreOutDistances = false) { runDistance<T>(true, // L2 resources, centroids, centroidsRowMajor, centroidNorms, queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm, ignoreOutDistances); } template <typename T> void runIPDistance(GpuResources* resources, Tensor<T, 2, true>& centroids, bool centroidsRowMajor, Tensor<T, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<T, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm) { runDistance<T>(false, // IP resources, centroids, centroidsRowMajor, nullptr, // no centroid norms provided queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm, false); } // // Instantiations of the distance templates // void runIPDistance(GpuResources* resources, Tensor<float, 2, true>& vectors, bool vectorsRowMajor, Tensor<float, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<float, 2, true>& outDistances, Tensor<int, 2, true>& outIndices) { runIPDistance<float>(resources, vectors, vectorsRowMajor, queries, queriesRowMajor, bitset, k, outDistances, outIndices, false); } void runIPDistance(GpuResources* resources, Tensor<half, 2, true>& vectors, bool vectorsRowMajor, Tensor<half, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<half, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm) { runIPDistance<half>(resources, vectors, vectorsRowMajor, queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm); } void runL2Distance(GpuResources* resources, Tensor<float, 2, true>& vectors, bool vectorsRowMajor, Tensor<float, 1, true>* vectorNorms, Tensor<float, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<float, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool ignoreOutDistances) { runL2Distance<float>(resources, vectors, vectorsRowMajor, vectorNorms, queries, queriesRowMajor, bitset, k, outDistances, outIndices, false, ignoreOutDistances); } void runL2Distance(GpuResources* resources, Tensor<half, 2, true>& vectors, bool vectorsRowMajor, Tensor<half, 1, true>* vectorNorms, Tensor<half, 2, true>& queries, bool queriesRowMajor, Tensor<uint8_t, 1, true>& bitset, int k, Tensor<half, 2, true>& outDistances, Tensor<int, 2, true>& outIndices, bool useHgemm, bool ignoreOutDistances) { runL2Distance<half>(resources, vectors, vectorsRowMajor, vectorNorms, queries, queriesRowMajor, bitset, k, outDistances, outIndices, useHgemm, ignoreOutDistances); } } } // namespace
b52ce0820843ff724a473333b8fd6f164f44955f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <vector> __device__ inline int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void grid_sampling_gpu_kernel(int batch_size, int input_point_num, float res_w, float res_l, float res_h, int grid_w, int grid_l, int grid_h, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_idx_temp, int* output_num_list, int* grid_buffer) { if (batch_size*input_point_num <=0) { printf("GridSamplingOp exits due to void inputs.\n"); return; } int grid_size = grid_w * grid_l * grid_h; int point_id = threadIdx.x + blockIdx.x * blockDim.x; if (point_id < input_point_num) { int batch_id = get_batch_id(input_accu_list, batch_size, point_id); int grid_coor_w = __float2int_rz(input_coors[point_id*3 + 0] / res_w); int grid_coor_l = __float2int_rz(input_coors[point_id*3 + 1] / res_l); int grid_coor_h = __float2int_rz(input_coors[point_id*3 + 2] / res_h); // printf("[%d, %d, %d]\n", grid_coor_w, grid_coor_l, grid_coor_h); int grid_buffer_idx = batch_id * grid_size + grid_coor_w * grid_l * grid_h + grid_coor_l * grid_h + grid_coor_h; int ret = atomicAdd(&grid_buffer[grid_buffer_idx], 1); if (ret == 0) { int count = atomicAdd(&output_num_list[batch_id], 1); output_idx_temp[input_accu_list[batch_id] + count] = point_id; } } } void grid_sampling_gpu_launcher(int batch_size, int input_point_num, std::vector<float> resolution, std::vector<int> grid_dims, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_idx_temp, int* output_num_list, int* grid_buffer) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, grid_sampling_gpu_kernel, 0, input_point_num); // Round up according to array size gridSize = (input_point_num + blockSize - 1) / blockSize; // printf("***************Here******************\n"); // printf("batch_size=%d, input_point_num=%d, resolution=%f, grid_w=%d, grid_l=%d, grid_h=%d\n", batch_size, input_point_num, resolution, grid_w, grid_l, grid_h); // printf("grid_size=%d, blockSize=%d\n", gridSize, blockSize); hipLaunchKernelGGL(( grid_sampling_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, batch_size, input_point_num, resolution[0], resolution[1], resolution[2], grid_dims[0], grid_dims[1], grid_dims[2], input_coors, input_num_list, input_accu_list, output_idx_temp, output_num_list, grid_buffer); }
b52ce0820843ff724a473333b8fd6f164f44955f.cu
/* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <vector> __device__ inline int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void grid_sampling_gpu_kernel(int batch_size, int input_point_num, float res_w, float res_l, float res_h, int grid_w, int grid_l, int grid_h, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_idx_temp, int* output_num_list, int* grid_buffer) { if (batch_size*input_point_num <=0) { printf("GridSamplingOp exits due to void inputs.\n"); return; } int grid_size = grid_w * grid_l * grid_h; int point_id = threadIdx.x + blockIdx.x * blockDim.x; if (point_id < input_point_num) { int batch_id = get_batch_id(input_accu_list, batch_size, point_id); int grid_coor_w = __float2int_rz(input_coors[point_id*3 + 0] / res_w); int grid_coor_l = __float2int_rz(input_coors[point_id*3 + 1] / res_l); int grid_coor_h = __float2int_rz(input_coors[point_id*3 + 2] / res_h); // printf("[%d, %d, %d]\n", grid_coor_w, grid_coor_l, grid_coor_h); int grid_buffer_idx = batch_id * grid_size + grid_coor_w * grid_l * grid_h + grid_coor_l * grid_h + grid_coor_h; int ret = atomicAdd(&grid_buffer[grid_buffer_idx], 1); if (ret == 0) { int count = atomicAdd(&output_num_list[batch_id], 1); output_idx_temp[input_accu_list[batch_id] + count] = point_id; } } } void grid_sampling_gpu_launcher(int batch_size, int input_point_num, std::vector<float> resolution, std::vector<int> grid_dims, const float* input_coors, const int* input_num_list, int* input_accu_list, int* output_idx_temp, int* output_num_list, int* grid_buffer) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, grid_sampling_gpu_kernel, 0, input_point_num); // Round up according to array size gridSize = (input_point_num + blockSize - 1) / blockSize; // printf("***************Here******************\n"); // printf("batch_size=%d, input_point_num=%d, resolution=%f, grid_w=%d, grid_l=%d, grid_h=%d\n", batch_size, input_point_num, resolution, grid_w, grid_l, grid_h); // printf("grid_size=%d, blockSize=%d\n", gridSize, blockSize); grid_sampling_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num, resolution[0], resolution[1], resolution[2], grid_dims[0], grid_dims[1], grid_dims[2], input_coors, input_num_list, input_accu_list, output_idx_temp, output_num_list, grid_buffer); }
6ba84577f2a271768eb43c0d94605dbdd7b6e584.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Elementary(for vectors less than elementary size) in-shared memory // combined radix-2 + radix-4 Fast Walsh Transform #define ELEMENTARY_LOG2SIZE 11 __global__ void fwtBatch1Kernel( float *__restrict__ d_Output, const float *__restrict__ d_Input, int log2N) { // Handle to thread block group const int N = 1 << log2N; const int base = blockIdx.x << log2N; //(2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80 extern __shared__ float s_data[]; const float *d_Src = d_Input + base; float *d_Dst = d_Output + base; for (int pos = threadIdx.x; pos < N; pos += blockDim.x) { s_data[pos] = d_Src[pos]; } //Main radix-4 stages const int pos = threadIdx.x; for (int stride = N >> 2; stride > 0; stride >>= 2) { int lo = pos & (stride - 1); int i0 = ((pos - lo) << 2) + lo; int i1 = i0 + stride; int i2 = i1 + stride; int i3 = i2 + stride; __syncthreads(); float D0 = s_data[i0]; float D1 = s_data[i1]; float D2 = s_data[i2]; float D3 = s_data[i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; s_data[i0] = D0 + D1; s_data[i1] = T - D1; T = D2; s_data[i2] = D2 + D3; s_data[i3] = T - D3; } //Do single radix-2 stage for odd power of two if (log2N & 1) { __syncthreads(); for (int pos = threadIdx.x; pos < N / 2; pos += blockDim.x) { int i0 = pos << 1; int i1 = i0 + 1; float D0 = s_data[i0]; float D1 = s_data[i1]; s_data[i0] = D0 + D1; s_data[i1] = D0 - D1; } } __syncthreads(); for (int pos = threadIdx.x; pos < N; pos += blockDim.x) { d_Dst[pos] = s_data[pos]; } } // Single in-global memory radix-4 Fast Walsh Transform pass // (for strides exceeding elementary vector size) __global__ void fwtBatch2Kernel( float *__restrict__ d_Output, const float *__restrict__ d_Input, int stride) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int N = blockDim.x * gridDim.x * 4; const float *d_Src = d_Input + blockIdx.y * N; float *d_Dst = d_Output + blockIdx.y * N; int lo = pos & (stride - 1); int i0 = ((pos - lo) << 2) + lo; int i1 = i0 + stride; int i2 = i1 + stride; int i3 = i2 + stride; float D0 = d_Src[i0]; float D1 = d_Src[i1]; float D2 = d_Src[i2]; float D3 = d_Src[i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; d_Dst[i0] = D0 + D1; d_Dst[i1] = T - D1; T = D2; d_Dst[i2] = D2 + D3; d_Dst[i3] = T - D3; } // Put everything together: batched Fast Walsh Transform CPU front-end void fwtBatchGPU(float *d_Data, int M, int log2N) { const int THREAD_N = 256; int N = 1 << log2N; dim3 grid(N / (4 * THREAD_N), M, 1); for (; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2) { hipLaunchKernelGGL(( fwtBatch2Kernel), dim3(grid), dim3(THREAD_N), 0, 0, d_Data, d_Data, N / 4); } hipLaunchKernelGGL(( fwtBatch1Kernel), dim3(M), dim3(N / 4), N *sizeof(float), 0, d_Data, d_Data, log2N ); } // Modulate two arrays __global__ void modulateKernel( float *__restrict__ d_A, const float *__restrict__ d_B, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; float rcpN = 1.0f / (float)N; for (int pos = tid; pos < N; pos += numThreads) { d_A[pos] *= d_B[pos] * rcpN; } } //Interface to modulateKernel() void modulateGPU(float *d_A, float *d_B, int N) { hipLaunchKernelGGL(( modulateKernel), dim3(128), dim3(256), 0, 0, d_A, d_B, N); }
6ba84577f2a271768eb43c0d94605dbdd7b6e584.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Elementary(for vectors less than elementary size) in-shared memory // combined radix-2 + radix-4 Fast Walsh Transform #define ELEMENTARY_LOG2SIZE 11 __global__ void fwtBatch1Kernel( float *__restrict__ d_Output, const float *__restrict__ d_Input, int log2N) { // Handle to thread block group const int N = 1 << log2N; const int base = blockIdx.x << log2N; //(2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80 extern __shared__ float s_data[]; const float *d_Src = d_Input + base; float *d_Dst = d_Output + base; for (int pos = threadIdx.x; pos < N; pos += blockDim.x) { s_data[pos] = d_Src[pos]; } //Main radix-4 stages const int pos = threadIdx.x; for (int stride = N >> 2; stride > 0; stride >>= 2) { int lo = pos & (stride - 1); int i0 = ((pos - lo) << 2) + lo; int i1 = i0 + stride; int i2 = i1 + stride; int i3 = i2 + stride; __syncthreads(); float D0 = s_data[i0]; float D1 = s_data[i1]; float D2 = s_data[i2]; float D3 = s_data[i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; s_data[i0] = D0 + D1; s_data[i1] = T - D1; T = D2; s_data[i2] = D2 + D3; s_data[i3] = T - D3; } //Do single radix-2 stage for odd power of two if (log2N & 1) { __syncthreads(); for (int pos = threadIdx.x; pos < N / 2; pos += blockDim.x) { int i0 = pos << 1; int i1 = i0 + 1; float D0 = s_data[i0]; float D1 = s_data[i1]; s_data[i0] = D0 + D1; s_data[i1] = D0 - D1; } } __syncthreads(); for (int pos = threadIdx.x; pos < N; pos += blockDim.x) { d_Dst[pos] = s_data[pos]; } } // Single in-global memory radix-4 Fast Walsh Transform pass // (for strides exceeding elementary vector size) __global__ void fwtBatch2Kernel( float *__restrict__ d_Output, const float *__restrict__ d_Input, int stride) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int N = blockDim.x * gridDim.x * 4; const float *d_Src = d_Input + blockIdx.y * N; float *d_Dst = d_Output + blockIdx.y * N; int lo = pos & (stride - 1); int i0 = ((pos - lo) << 2) + lo; int i1 = i0 + stride; int i2 = i1 + stride; int i3 = i2 + stride; float D0 = d_Src[i0]; float D1 = d_Src[i1]; float D2 = d_Src[i2]; float D3 = d_Src[i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; d_Dst[i0] = D0 + D1; d_Dst[i1] = T - D1; T = D2; d_Dst[i2] = D2 + D3; d_Dst[i3] = T - D3; } // Put everything together: batched Fast Walsh Transform CPU front-end void fwtBatchGPU(float *d_Data, int M, int log2N) { const int THREAD_N = 256; int N = 1 << log2N; dim3 grid(N / (4 * THREAD_N), M, 1); for (; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2) { fwtBatch2Kernel<<<grid, THREAD_N>>>(d_Data, d_Data, N / 4); } fwtBatch1Kernel<<<M, N / 4, N *sizeof(float)>>>( d_Data, d_Data, log2N ); } // Modulate two arrays __global__ void modulateKernel( float *__restrict__ d_A, const float *__restrict__ d_B, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; float rcpN = 1.0f / (float)N; for (int pos = tid; pos < N; pos += numThreads) { d_A[pos] *= d_B[pos] * rcpN; } } //Interface to modulateKernel() void modulateGPU(float *d_A, float *d_B, int N) { modulateKernel<<<128, 256>>>(d_A, d_B, N); }
35175852c1daa7a6359e4f2e036f132a4246f250.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void printSuccessForCorrectExecutionConfiguration() { if(threadIdx.x == 1023 && blockIdx.x == 255) { printf("Success!\n"); } else { printf("Failure. Update the execution configuration as necessary.\n"); } } int main() { /* * Success! */ hipLaunchKernelGGL(( printSuccessForCorrectExecutionConfiguration), dim3(1), dim3(1), 0, 0, ); }
35175852c1daa7a6359e4f2e036f132a4246f250.cu
#include <stdio.h> __global__ void printSuccessForCorrectExecutionConfiguration() { if(threadIdx.x == 1023 && blockIdx.x == 255) { printf("Success!\n"); } else { printf("Failure. Update the execution configuration as necessary.\n"); } } int main() { /* * カーネルが「Success!」を出力するように実行構成を更新します。 */ printSuccessForCorrectExecutionConfiguration<<<1, 1>>>(); }
091f060a372cc5e39cad7c730bf52b27619f4d07.hip
// !!! This is a file automatically generated by hipify!!! // Matrix Multiplication in CUDA #include <stdio.h> //#include <string.h> //#include <assert.h> //#include <stdlib.h> #include <hip/hip_runtime.h> // includes, project //////////////////////////////////////////////////////////////////////////////// // declarations, forward #define WIDTH 32 extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // FILL HERE: define constant variable __constant__ float d_A [WIDTH*WIDTH] ; // MatrixMul kernel /** * CUDA Kernel Device code * * Computes the matrix multiplication of A and B into C. The 3 matrices have the same * number of elements WIDTH*WIDTH. */ // FILL HERE: translate C-version matrixMul to CUDA-version kernel code __global__ void MatrixMul(float* A, float* B, float* C, unsigned long long* runtime) { // TODO : Kernel Function // C = A * B // --> i unsigned long long start_time= clock64(); int tid = threadIdx.x; int row = tid/WIDTH; int col = tid%WIDTH; float cl=0.0; for(int k = 0; k < WIDTH; k++) { cl += d_A[row *WIDTH + k] * B[k*WIDTH+col] ; } C[row*WIDTH+ col] = cl; // <-- unsigned long long stop_time= clock64(); runtime[tid] = (unsigned long long) (stop_time - start_time); } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the matrix size to be used, and compute its size int size = WIDTH*WIDTH*sizeof(float); printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH); // Allocate the host input matrix h_A float *h_A = (float *)malloc(size); // Allocate the host input matrix h_B float *h_B = (float *)malloc(size); // Allocate the host input matrix h_C float *h_C = (float *)malloc(size); // Allocate the host matrix for compute check float *reference = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } // Initialize the host input matrices for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { h_A[i*WIDTH + j] = 0.01f; h_B[i*WIDTH + j] = 1.0f; } } memset(h_C, 0, size); memset(reference, 0, size); // compute the matrix multiplication on the CPU for comparison computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH); // Allocate device input matrices // TODO : Leave/Remove the given hipMalloc code properly // --> //float* d_A = NULL; //err = hipMalloc((void**)&d_A, size); //if (err != hipSuccess) //{ // fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", hipGetErrorString(err)); // exit(EXIT_FAILURE); //} float* d_B = NULL; err = hipMalloc((void**)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // Allocate the device output matrix float* d_C = NULL; err = hipMalloc((void**)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input matrix A and B in host memory to the device input matrices in // device memory // TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored // --> printf("Copy input data from the host memory to the CUDA device\n"); err= hipMemcpyToSymbol(d_A, h_A, size);// FILL HERE if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);// FILL HERE //err = ;// FILL HERE if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //int blocksPerGrid =1; // int threadsPerBlock = 1024; // <-- // TODO : Clock Measurements // Add code to return clock cycles from kernel // --> #ifdef TM unsigned long long* d_runtime; int r_size = WIDTH*WIDTH*sizeof(unsigned long long); unsigned long long* runtime = (unsigned long long*)malloc(r_size); memset(runtime, 0, r_size); hipMalloc((void**)&d_runtime, r_size); #endif // <-- // TODO : Kernel Invocation // Assign as many threads as the size of matrix in a thread block and // invoke the kernel function. // --> int blocksPerGrid = 1;// FILL HERE int threadsPerBlock =1024 ;// FILL HERE printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( MatrixMul) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_runtime); // <-- err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); // Copy the device result matrix in device memory to the host result matrix // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); // Verify that the result matrix is correct bool res = 1; for (int i = 0; i < WIDTH*WIDTH; i++) { float diff = fabs(reference[i] - h_C[i]); if(diff > 0.001f) { res = 0; break; } } printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED"); // TODO : Get elapsed clock cycles from device to host // Take the longest time as kernel execution time // --> #ifdef TM hipMemcpy(runtime, d_runtime, r_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); unsigned long long elapsed_time = 0; for(int i = 0; i < WIDTH*WIDTH; i++) if(elapsed_time < runtime[i]) elapsed_time = runtime[i]; printf("Kernel Execution Time: %llu cycles\n", elapsed_time); #endif // <-- // TODO : Free device global memory // Leave/Remove the given hipFree statement according to your data allocation // --> hipFree(d_B); hipFree(d_C); #ifdef TM hipFree(d_runtime); #endif // <-- // Free host memory free(h_A); free(h_B); free(h_C); free(reference); #ifdef TM free(runtime); #endif return 0; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } }
091f060a372cc5e39cad7c730bf52b27619f4d07.cu
// Matrix Multiplication in CUDA #include <stdio.h> //#include <string.h> //#include <assert.h> //#include <stdlib.h> #include <cuda_runtime.h> // includes, project //////////////////////////////////////////////////////////////////////////////// // declarations, forward #define WIDTH 32 extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // FILL HERE: define constant variable __constant__ float d_A [WIDTH*WIDTH] ; // MatrixMul kernel /** * CUDA Kernel Device code * * Computes the matrix multiplication of A and B into C. The 3 matrices have the same * number of elements WIDTH*WIDTH. */ // FILL HERE: translate C-version matrixMul to CUDA-version kernel code __global__ void MatrixMul(float* A, float* B, float* C, unsigned long long* runtime) { // TODO : Kernel Function // C = A * B // --> i unsigned long long start_time= clock64(); int tid = threadIdx.x; int row = tid/WIDTH; int col = tid%WIDTH; float cl=0.0; for(int k = 0; k < WIDTH; k++) { cl += d_A[row *WIDTH + k] * B[k*WIDTH+col] ; } C[row*WIDTH+ col] = cl; // <-- unsigned long long stop_time= clock64(); runtime[tid] = (unsigned long long) (stop_time - start_time); } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the matrix size to be used, and compute its size int size = WIDTH*WIDTH*sizeof(float); printf("[MatrixMul of %d x %d elements]\n", WIDTH, WIDTH); // Allocate the host input matrix h_A float *h_A = (float *)malloc(size); // Allocate the host input matrix h_B float *h_B = (float *)malloc(size); // Allocate the host input matrix h_C float *h_C = (float *)malloc(size); // Allocate the host matrix for compute check float *reference = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL || reference == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } // Initialize the host input matrices for (int i = 0; i < WIDTH; ++i) { for (int j = 0; j < WIDTH; ++j) { h_A[i*WIDTH + j] = 0.01f; h_B[i*WIDTH + j] = 1.0f; } } memset(h_C, 0, size); memset(reference, 0, size); // compute the matrix multiplication on the CPU for comparison computeGold(reference, h_A, h_B, WIDTH, WIDTH, WIDTH); // Allocate device input matrices // TODO : Leave/Remove the given cudaMalloc code properly // --> //float* d_A = NULL; //err = cudaMalloc((void**)&d_A, size); //if (err != cudaSuccess) //{ // fprintf(stderr, "Failed to allocate device matrix A (error code %s)!\n", cudaGetErrorString(err)); // exit(EXIT_FAILURE); //} float* d_B = NULL; err = cudaMalloc((void**)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // <-- // Allocate the device output matrix float* d_C = NULL; err = cudaMalloc((void**)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device matrix C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input matrix A and B in host memory to the device input matrices in // device memory // TODO : Add proper mem copy APIs according to the memory that matrix A and B will be stored // --> printf("Copy input data from the host memory to the CUDA device\n"); err= cudaMemcpyToSymbol(d_A, h_A, size);// FILL HERE if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);// FILL HERE //err = ;// FILL HERE if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //int blocksPerGrid =1; // int threadsPerBlock = 1024; // <-- // TODO : Clock Measurements // Add code to return clock cycles from kernel // --> #ifdef TM unsigned long long* d_runtime; int r_size = WIDTH*WIDTH*sizeof(unsigned long long); unsigned long long* runtime = (unsigned long long*)malloc(r_size); memset(runtime, 0, r_size); cudaMalloc((void**)&d_runtime, r_size); #endif // <-- // TODO : Kernel Invocation // Assign as many threads as the size of matrix in a thread block and // invoke the kernel function. // --> int blocksPerGrid = 1;// FILL HERE int threadsPerBlock =1024 ;// FILL HERE printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); MatrixMul <<<blocksPerGrid, threadsPerBlock>>> (d_A, d_B, d_C, d_runtime); // <-- err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch matrixMul kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); // Copy the device result matrix in device memory to the host result matrix // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); // Verify that the result matrix is correct bool res = 1; for (int i = 0; i < WIDTH*WIDTH; i++) { float diff = fabs(reference[i] - h_C[i]); if(diff > 0.001f) { res = 0; break; } } printf("Test %s\n", (res == 1) ? "PASSED" : "FAILED"); // TODO : Get elapsed clock cycles from device to host // Take the longest time as kernel execution time // --> #ifdef TM cudaMemcpy(runtime, d_runtime, r_size, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); unsigned long long elapsed_time = 0; for(int i = 0; i < WIDTH*WIDTH; i++) if(elapsed_time < runtime[i]) elapsed_time = runtime[i]; printf("Kernel Execution Time: %llu cycles\n", elapsed_time); #endif // <-- // TODO : Free device global memory // Leave/Remove the given cudaFree statement according to your data allocation // --> cudaFree(d_B); cudaFree(d_C); #ifdef TM cudaFree(d_runtime); #endif // <-- // Free host memory free(h_A); free(h_B); free(h_C); free(reference); #ifdef TM free(runtime); #endif return 0; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } }
8dc80e4f7ca095be4955cfbcb8f7b5051398327d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/book.h" __global__ void kernel(void) { } int main(void) { hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, ); printf("Hello, World!\n"); return 0; }
8dc80e4f7ca095be4955cfbcb8f7b5051398327d.cu
#include "common/book.h" __global__ void kernel(void) { } int main(void) { kernel<<<1,1>>>(); printf("Hello, World!\n"); return 0; }
d67e0ac3ff25a11ef6b785bdd242a5f4f5d16cff.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "sssp_kernel.cu" #include "findMin.h" #define INF 1073741824 // 1024*1024*1024 #define QMAXLENGTH 10240000 extern unsigned int solution; extern unsigned int device_num; inline void cudaCheckError(int line, hipError_t ce) { if (ce != hipSuccess){ printf("Error: line %d %s\n", line, hipGetErrorString(ce)); exit(1); } } extern double gettime(); void SSSP_GPU( int *vertexArray, int *edgeArray, int *weightArray, int *costArray, char *frontier, int nodeNumber, int edgeNumber, int source) { int noPerBlock = nodeNumber; int minValue = 0; int *d_vertexArray; int *d_costArray; int *d_edgeArray; int *d_weightArray; int *d_workQueue; int *d_workQueue_1; int *d_bufferBlock_1024_1024; int *d_bufferBlock_1024; int *d_minValue; char *d_frontier; char *d_update; char *commit = new char [nodeNumber](); char *update = new char [nodeNumber](); char *d_commit; unsigned int *d_qCounter; unsigned int *d_qLength; unsigned int *d_qLength_1; dim3 dimGrid(1,1,1); // thread+bitmap dim3 dimBlock(1,1,1); int maxDegreeB = 32; dim3 dimBGrid(1,1,1); // block+bitmap dim3 dimBBlock(maxDegreeB,1,1); int maxDegreeT = 192; // thread/block, thread+queue dim3 dimGridT(1,1,1); dim3 dimBlockT(maxDegreeT,1,1); dim3 dimGridB(1,1,1); // block+queue dim3 dimBlockB(maxDegreeB,1,1); unsigned int qMaxLength = QMAXLENGTH; int *workQueue = new int [qMaxLength]; unsigned int qLength = 0; unsigned int qCounter = 0; unsigned int qMaxLength_1 = QMAXLENGTH / 5; int *workQueue_1 = new int [qMaxLength_1]; unsigned int qLength_1 = 0; double time, end_time; time = gettime(); cudaCheckError( __LINE__, hipSetDevice(device_num) ); printf("Choose CUDA device: %d\n", device_num); end_time = gettime(); printf("hipSetDevice:\t\t%lf\n",end_time-time); if ( noPerBlock > maxDegreeT ){ dimGrid.x = nodeNumber / maxDegreeT + 1; dimBlock.x = maxDegreeT; } else { dimGrid.x = 1; dimBlock.x = noPerBlock; } /* Configuration for block+bitmap */ if ( nodeNumber > MAXDIMGRID ){ dimBGrid.x = MAXDIMGRID; dimBGrid.y = nodeNumber / MAXDIMGRID + 1; } else { dimBGrid.x = nodeNumber; } /* initialization */ for (int i=0; i<nodeNumber; i++ ) { costArray[i] = INF; } update[source] = 1; costArray[source] = 0; //printf("Active number in queue:%d\n", qLength); /* Allocate GPU memory */ time = gettime(); cudaCheckError( __LINE__, hipMalloc( (void**)&d_vertexArray, sizeof(int)*(nodeNumber+1) ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_costArray, sizeof(int)*nodeNumber ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_edgeArray, sizeof(int)*edgeNumber ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_weightArray, sizeof(int)*edgeNumber ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_frontier, sizeof(char)*nodeNumber ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_update, sizeof(char)*nodeNumber ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_commit, sizeof(char)*nodeNumber ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_workQueue, sizeof(int)*qMaxLength) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_qCounter, sizeof(unsigned int) ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_qLength, sizeof(unsigned int) ) ); if ( solution<4 ) { cudaCheckError( __LINE__, hipMalloc( (void**)&d_bufferBlock_1024, sizeof(int)*1024 ) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_bufferBlock_1024_1024, sizeof(int)*1024*1024 ) ); } if ( solution==9 ) { cudaCheckError( __LINE__, hipMalloc( (void**)&d_workQueue_1, sizeof(int)*qMaxLength_1) ); cudaCheckError( __LINE__, hipMalloc( (void**)&d_qLength_1, sizeof(unsigned int) ) ); } cudaCheckError( __LINE__, hipMalloc( (void**)&d_minValue, sizeof(int)) ); end_time = gettime(); printf("hipMalloc:\t\t%lf\n",end_time-time); time = gettime(); cudaCheckError( __LINE__, hipMemcpy( d_vertexArray, vertexArray, sizeof(int)*(nodeNumber+1), hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemcpy( d_edgeArray, edgeArray, sizeof(int)*edgeNumber, hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemcpy( d_costArray, costArray, sizeof(int)*nodeNumber,hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemcpy( d_weightArray, weightArray, sizeof(int)*edgeNumber, hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemcpy( d_update, update, sizeof(char)*nodeNumber, hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemcpy( d_qCounter, &qCounter, sizeof(unsigned int), hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemcpy( d_qLength, &qLength, sizeof(unsigned int), hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemcpy( d_minValue, &minValue, sizeof(int), hipMemcpyHostToDevice) ); cudaCheckError( __LINE__, hipMemset(d_commit, 0, sizeof(char)*nodeNumber) ); if ( solution==9 ) { cudaCheckError( __LINE__, hipMemcpy( d_qLength_1, &qLength_1, sizeof(unsigned int), hipMemcpyHostToDevice) ); } end_time = gettime(); printf("hipMemcpy:\t\t%lf\n",end_time-time); time = gettime(); int iteration = 0; printf("Solution is %d\n", solution); /* Initialize working set */ switch ( solution ){ case 0: case 2: break; case 1: case 3: cudaCheckError( __LINE__, hipMemset(d_qCounter, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( order_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_costArray, d_workQueue, d_commit, d_qCounter, qMaxLength, nodeNumber, d_minValue); break; case 4: case 6: hipLaunchKernelGGL(( generateBitmap_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_frontier, d_update, nodeNumber); break; case 5: case 7: case 8: hipLaunchKernelGGL(( unorder_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); cudaCheckError( __LINE__, hipMemcpy(&qLength,d_qLength,sizeof(unsigned int), hipMemcpyDeviceToHost)); break; case 9: hipLaunchKernelGGL(( unorder_gen_multiQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertexArray, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength, d_workQueue_1, d_qLength_1, qMaxLength_1); break; default: break; } do { //if (iteration==3) break; switch ( solution ){ case 0: // order+thread+bitmap //printf("Thread+Bitmap\n"); hipLaunchKernelGGL(( O_T_B_commit_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_commit, d_update, nodeNumber, edgeNumber, d_minValue); break; case 1: // order+thread+queue //printf("Thread+Queue\n"); cudaCheckError( __LINE__, hipMemset(d_qCounter, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( order_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_costArray, d_workQueue, d_commit, d_qCounter, qMaxLength, nodeNumber, d_minValue); hipLaunchKernelGGL(( O_T_Q_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_update, d_workQueue, d_qCounter, qMaxLength); break; case 2: // order+block+bitmap //printf("Block+Bitmap\n"); hipLaunchKernelGGL(( O_B_B_commit_kernel), dim3(dimBGrid), dim3(dimBBlock), 0, 0, d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_commit, d_update, nodeNumber, edgeNumber, d_minValue); break; case 3: // order+block+queue //printf("Block+Queue\n"); cudaCheckError( __LINE__, hipMemset(d_qCounter, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( order_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_costArray, d_workQueue, d_commit, d_qCounter, qMaxLength, nodeNumber, d_minValue); hipLaunchKernelGGL(( O_B_Q_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_update, d_workQueue, d_qCounter, qMaxLength); break; case 4: // unorder+thread+bitmap //printf("Thread+Bitmap\n"); hipLaunchKernelGGL(( unorder_threadBitmap_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_frontier, d_update, nodeNumber); hipLaunchKernelGGL(( generateBitmap_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_frontier, d_update, nodeNumber); break; case 5: // unorder+thread+queue //printf("Thread+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } hipLaunchKernelGGL(( unorder_threadQueue_kernel), dim3(dimGridT), dim3(dimBlockT), 0, 0, d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, hipMemset(d_qLength, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( unorder_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; case 6: // unorder+block+bitmap //printf("Block+Bitmap\n"); hipLaunchKernelGGL(( unorder_blockBitmap_kernel), dim3(dimBGrid), dim3(dimBBlock), 0, 0, d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_frontier, d_update, nodeNumber); cudaCheckError( __LINE__, hipMemset(d_frontier, 0, sizeof(char)*nodeNumber) ); hipLaunchKernelGGL(( generateBitmap_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_frontier, d_update, nodeNumber); break; case 7: // unorder+block+queue //printf("Block+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=MAXDIMGRID){ dimGridB.x = qLength; } else if (qLength<=MAXDIMGRID*1024){ dimGridB.x = MAXDIMGRID; dimGridB.y = qLength/MAXDIMGRID+1; } else{ printf("Too many elements in queue\n"); exit(0); } hipLaunchKernelGGL(( unorder_blockQueue_kernel), dim3(dimGridB), dim3(dimBlockB), 0, 0, d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, hipMemset(d_qLength, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( unorder_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; case 8: // unordered + thread mapping + queue + delayed buffer //printf("Thread+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } hipLaunchKernelGGL(( unorder_threadQueue_lb_kernel), dim3(dimGridT), dim3(dimBlockT), 0, 0, d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, hipMemset(d_qLength, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( unorder_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; case 9: // unordered + thread mapping + priority queue //printf("Thread+Queue\n"); /* Dynamic kernel configuration for thread mapping */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } hipLaunchKernelGGL(( unorder_threadQueue_kernel), dim3(dimGridT), dim3(dimBlockT), 0, 0, d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); /* Dynamic kernel configuration for thread mapping */ if (qLength_1<=MAXDIMGRID){ dimGridB.x = qLength_1; } else if (qLength_1<=MAXDIMGRID*1024){ dimGridB.x = MAXDIMGRID; dimGridB.y = qLength_1/MAXDIMGRID+1; } else{ printf("Too many elements in queue\n"); exit(0); } hipLaunchKernelGGL(( unorder_blockQueue_kernel), dim3(dimGridB), dim3(dimBlockB), 0, 0, d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue_1, d_qLength_1); cudaCheckError( __LINE__, hipMemset(d_qLength, 0, sizeof(unsigned int))); cudaCheckError( __LINE__, hipMemset(d_qLength_1, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( unorder_gen_multiQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vertexArray, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength, d_workQueue_1, d_qLength_1, qMaxLength_1); break; case 10:// unorder+thread queue+dynamic parallelism //printf("Thread+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } hipLaunchKernelGGL(( unorder_threadQueue_dp_kernel), dim3(dimGridT), dim3(dimBlockT), 0, 0, d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, hipMemset(d_qLength, 0, sizeof(unsigned int))); hipLaunchKernelGGL(( unorder_generateQueue_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; default: break; } if ( solution<4 ){ // order findMin(d_costArray, d_commit, d_bufferBlock_1024, d_bufferBlock_1024_1024, d_minValue, nodeNumber); cudaCheckError( __LINE__, hipMemcpy( &minValue, d_minValue, sizeof(int), hipMemcpyDeviceToHost ) ); //printf("min value is :%d\n", minValue); if ( minValue>=INF ) break; } else { // unorder if ( solution==4 || solution==6 ){ // bitmap cudaCheckError( __LINE__, hipMemset(d_qLength, 0, sizeof(unsigned int)) ); hipLaunchKernelGGL(( countWorkingset_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_update, d_qLength, qMaxLength, nodeNumber); cudaCheckError( __LINE__, hipMemset(d_update, 0, sizeof(char)*nodeNumber) ); } cudaCheckError( __LINE__, hipMemcpy(&qLength,d_qLength,sizeof(unsigned int), hipMemcpyDeviceToHost)); if ( solution==9 ) { cudaCheckError( __LINE__, hipMemcpy(&qLength_1,d_qLength_1,sizeof(unsigned int), hipMemcpyDeviceToHost)); qLength = qLength + qLength_1; } //printf("Working set size is %d\n", qLength); if (qLength==0) break; } iteration++; }while(1); cudaCheckError( __LINE__, hipMemcpy( costArray, d_costArray, sizeof(int)*nodeNumber, hipMemcpyDeviceToHost) ); end_time = gettime(); printf("SSSP iteration:\t\t%lf\n",end_time-time); printf("SSSP terminated in %d iterations\n", iteration); hipFree(d_vertexArray); hipFree(d_costArray); hipFree(d_edgeArray); hipFree(d_weightArray); }
d67e0ac3ff25a11ef6b785bdd242a5f4f5d16cff.cu
#include <stdio.h> #include <cuda.h> #include "sssp_kernel.cu" #include "findMin.h" #define INF 1073741824 // 1024*1024*1024 #define QMAXLENGTH 10240000 extern unsigned int solution; extern unsigned int device_num; inline void cudaCheckError(int line, cudaError_t ce) { if (ce != cudaSuccess){ printf("Error: line %d %s\n", line, cudaGetErrorString(ce)); exit(1); } } extern double gettime(); void SSSP_GPU( int *vertexArray, int *edgeArray, int *weightArray, int *costArray, char *frontier, int nodeNumber, int edgeNumber, int source) { int noPerBlock = nodeNumber; int minValue = 0; int *d_vertexArray; int *d_costArray; int *d_edgeArray; int *d_weightArray; int *d_workQueue; int *d_workQueue_1; int *d_bufferBlock_1024_1024; int *d_bufferBlock_1024; int *d_minValue; char *d_frontier; char *d_update; char *commit = new char [nodeNumber](); char *update = new char [nodeNumber](); char *d_commit; unsigned int *d_qCounter; unsigned int *d_qLength; unsigned int *d_qLength_1; dim3 dimGrid(1,1,1); // thread+bitmap dim3 dimBlock(1,1,1); int maxDegreeB = 32; dim3 dimBGrid(1,1,1); // block+bitmap dim3 dimBBlock(maxDegreeB,1,1); int maxDegreeT = 192; // thread/block, thread+queue dim3 dimGridT(1,1,1); dim3 dimBlockT(maxDegreeT,1,1); dim3 dimGridB(1,1,1); // block+queue dim3 dimBlockB(maxDegreeB,1,1); unsigned int qMaxLength = QMAXLENGTH; int *workQueue = new int [qMaxLength]; unsigned int qLength = 0; unsigned int qCounter = 0; unsigned int qMaxLength_1 = QMAXLENGTH / 5; int *workQueue_1 = new int [qMaxLength_1]; unsigned int qLength_1 = 0; double time, end_time; time = gettime(); cudaCheckError( __LINE__, cudaSetDevice(device_num) ); printf("Choose CUDA device: %d\n", device_num); end_time = gettime(); printf("cudaSetDevice:\t\t%lf\n",end_time-time); if ( noPerBlock > maxDegreeT ){ dimGrid.x = nodeNumber / maxDegreeT + 1; dimBlock.x = maxDegreeT; } else { dimGrid.x = 1; dimBlock.x = noPerBlock; } /* Configuration for block+bitmap */ if ( nodeNumber > MAXDIMGRID ){ dimBGrid.x = MAXDIMGRID; dimBGrid.y = nodeNumber / MAXDIMGRID + 1; } else { dimBGrid.x = nodeNumber; } /* initialization */ for (int i=0; i<nodeNumber; i++ ) { costArray[i] = INF; } update[source] = 1; costArray[source] = 0; //printf("Active number in queue:%d\n", qLength); /* Allocate GPU memory */ time = gettime(); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_vertexArray, sizeof(int)*(nodeNumber+1) ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_costArray, sizeof(int)*nodeNumber ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_edgeArray, sizeof(int)*edgeNumber ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_weightArray, sizeof(int)*edgeNumber ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_frontier, sizeof(char)*nodeNumber ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_update, sizeof(char)*nodeNumber ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_commit, sizeof(char)*nodeNumber ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_workQueue, sizeof(int)*qMaxLength) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_qCounter, sizeof(unsigned int) ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_qLength, sizeof(unsigned int) ) ); if ( solution<4 ) { cudaCheckError( __LINE__, cudaMalloc( (void**)&d_bufferBlock_1024, sizeof(int)*1024 ) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_bufferBlock_1024_1024, sizeof(int)*1024*1024 ) ); } if ( solution==9 ) { cudaCheckError( __LINE__, cudaMalloc( (void**)&d_workQueue_1, sizeof(int)*qMaxLength_1) ); cudaCheckError( __LINE__, cudaMalloc( (void**)&d_qLength_1, sizeof(unsigned int) ) ); } cudaCheckError( __LINE__, cudaMalloc( (void**)&d_minValue, sizeof(int)) ); end_time = gettime(); printf("cudaMalloc:\t\t%lf\n",end_time-time); time = gettime(); cudaCheckError( __LINE__, cudaMemcpy( d_vertexArray, vertexArray, sizeof(int)*(nodeNumber+1), cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemcpy( d_edgeArray, edgeArray, sizeof(int)*edgeNumber, cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemcpy( d_costArray, costArray, sizeof(int)*nodeNumber,cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemcpy( d_weightArray, weightArray, sizeof(int)*edgeNumber, cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemcpy( d_update, update, sizeof(char)*nodeNumber, cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemcpy( d_qCounter, &qCounter, sizeof(unsigned int), cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemcpy( d_qLength, &qLength, sizeof(unsigned int), cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemcpy( d_minValue, &minValue, sizeof(int), cudaMemcpyHostToDevice) ); cudaCheckError( __LINE__, cudaMemset(d_commit, 0, sizeof(char)*nodeNumber) ); if ( solution==9 ) { cudaCheckError( __LINE__, cudaMemcpy( d_qLength_1, &qLength_1, sizeof(unsigned int), cudaMemcpyHostToDevice) ); } end_time = gettime(); printf("cudaMemcpy:\t\t%lf\n",end_time-time); time = gettime(); int iteration = 0; printf("Solution is %d\n", solution); /* Initialize working set */ switch ( solution ){ case 0: case 2: break; case 1: case 3: cudaCheckError( __LINE__, cudaMemset(d_qCounter, 0, sizeof(unsigned int))); order_generateQueue_kernel<<<dimGrid, dimBlock>>>( d_costArray, d_workQueue, d_commit, d_qCounter, qMaxLength, nodeNumber, d_minValue); break; case 4: case 6: generateBitmap_kernel<<<dimGrid, dimBlock>>>(d_frontier, d_update, nodeNumber); break; case 5: case 7: case 8: unorder_generateQueue_kernel<<<dimGrid, dimBlock>>>(d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); cudaCheckError( __LINE__, cudaMemcpy(&qLength,d_qLength,sizeof(unsigned int), cudaMemcpyDeviceToHost)); break; case 9: unorder_gen_multiQueue_kernel<<<dimGrid, dimBlock>>>(d_vertexArray, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength, d_workQueue_1, d_qLength_1, qMaxLength_1); break; default: break; } do { //if (iteration==3) break; switch ( solution ){ case 0: // order+thread+bitmap //printf("Thread+Bitmap\n"); O_T_B_commit_kernel<<<dimGrid, dimBlock>>>( d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_commit, d_update, nodeNumber, edgeNumber, d_minValue); break; case 1: // order+thread+queue //printf("Thread+Queue\n"); cudaCheckError( __LINE__, cudaMemset(d_qCounter, 0, sizeof(unsigned int))); order_generateQueue_kernel<<<dimGrid, dimBlock>>>( d_costArray, d_workQueue, d_commit, d_qCounter, qMaxLength, nodeNumber, d_minValue); O_T_Q_kernel<<<dimGrid, dimBlock>>>(d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_update, d_workQueue, d_qCounter, qMaxLength); break; case 2: // order+block+bitmap //printf("Block+Bitmap\n"); O_B_B_commit_kernel<<<dimBGrid, dimBBlock>>>(d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_commit, d_update, nodeNumber, edgeNumber, d_minValue); break; case 3: // order+block+queue //printf("Block+Queue\n"); cudaCheckError( __LINE__, cudaMemset(d_qCounter, 0, sizeof(unsigned int))); order_generateQueue_kernel<<<dimGrid, dimBlock>>>( d_costArray, d_workQueue, d_commit, d_qCounter, qMaxLength, nodeNumber, d_minValue); O_B_Q_kernel<<<dimGrid, dimBlock>>>(d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_update, d_workQueue, d_qCounter, qMaxLength); break; case 4: // unorder+thread+bitmap //printf("Thread+Bitmap\n"); unorder_threadBitmap_kernel<<<dimGrid, dimBlock>>>( d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_frontier, d_update, nodeNumber); generateBitmap_kernel<<<dimGrid, dimBlock>>>(d_frontier, d_update, nodeNumber); break; case 5: // unorder+thread+queue //printf("Thread+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } unorder_threadQueue_kernel<<<dimGridT, dimBlockT>>>(d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, cudaMemset(d_qLength, 0, sizeof(unsigned int))); unorder_generateQueue_kernel<<<dimGrid, dimBlock>>>(d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; case 6: // unorder+block+bitmap //printf("Block+Bitmap\n"); unorder_blockBitmap_kernel<<<dimBGrid, dimBBlock>>>(d_vertexArray, d_costArray, d_edgeArray, d_weightArray, d_frontier, d_update, nodeNumber); cudaCheckError( __LINE__, cudaMemset(d_frontier, 0, sizeof(char)*nodeNumber) ); generateBitmap_kernel<<<dimGrid, dimBlock>>>(d_frontier, d_update, nodeNumber); break; case 7: // unorder+block+queue //printf("Block+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=MAXDIMGRID){ dimGridB.x = qLength; } else if (qLength<=MAXDIMGRID*1024){ dimGridB.x = MAXDIMGRID; dimGridB.y = qLength/MAXDIMGRID+1; } else{ printf("Too many elements in queue\n"); exit(0); } unorder_blockQueue_kernel<<<dimGridB, dimBlockB>>>( d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, cudaMemset(d_qLength, 0, sizeof(unsigned int))); unorder_generateQueue_kernel<<<dimGrid, dimBlock>>>(d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; case 8: // unordered + thread mapping + queue + delayed buffer //printf("Thread+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } unorder_threadQueue_lb_kernel<<<dimGridT, dimBlockT>>>(d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, cudaMemset(d_qLength, 0, sizeof(unsigned int))); unorder_generateQueue_kernel<<<dimGrid, dimBlock>>>(d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; case 9: // unordered + thread mapping + priority queue //printf("Thread+Queue\n"); /* Dynamic kernel configuration for thread mapping */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } unorder_threadQueue_kernel<<<dimGridT, dimBlockT>>>(d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); /* Dynamic kernel configuration for thread mapping */ if (qLength_1<=MAXDIMGRID){ dimGridB.x = qLength_1; } else if (qLength_1<=MAXDIMGRID*1024){ dimGridB.x = MAXDIMGRID; dimGridB.y = qLength_1/MAXDIMGRID+1; } else{ printf("Too many elements in queue\n"); exit(0); } unorder_blockQueue_kernel<<<dimGridB, dimBlockB>>>( d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue_1, d_qLength_1); cudaCheckError( __LINE__, cudaMemset(d_qLength, 0, sizeof(unsigned int))); cudaCheckError( __LINE__, cudaMemset(d_qLength_1, 0, sizeof(unsigned int))); unorder_gen_multiQueue_kernel<<<dimGrid, dimBlock>>>(d_vertexArray, d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength, d_workQueue_1, d_qLength_1, qMaxLength_1); break; case 10:// unorder+thread queue+dynamic parallelism //printf("Thread+Queue\n"); /* Dynamic kernel configuration */ if (qLength<=maxDegreeT){ dimGridT.x = 1; } else if (qLength<=maxDegreeT*MAXDIMGRID){ dimGridT.x = qLength/maxDegreeT+1; } else{ printf("Too many elements in queue\n"); exit(0); } unorder_threadQueue_dp_kernel<<<dimGridT, dimBlockT>>>(d_vertexArray, d_edgeArray, d_costArray, d_weightArray, d_update, nodeNumber, d_workQueue, d_qLength); cudaCheckError( __LINE__, cudaMemset(d_qLength, 0, sizeof(unsigned int))); unorder_generateQueue_kernel<<<dimGrid, dimBlock>>>(d_update, nodeNumber, d_workQueue, d_qLength, qMaxLength); break; default: break; } if ( solution<4 ){ // order findMin(d_costArray, d_commit, d_bufferBlock_1024, d_bufferBlock_1024_1024, d_minValue, nodeNumber); cudaCheckError( __LINE__, cudaMemcpy( &minValue, d_minValue, sizeof(int), cudaMemcpyDeviceToHost ) ); //printf("min value is :%d\n", minValue); if ( minValue>=INF ) break; } else { // unorder if ( solution==4 || solution==6 ){ // bitmap cudaCheckError( __LINE__, cudaMemset(d_qLength, 0, sizeof(unsigned int)) ); countWorkingset_kernel<<<dimGrid, dimBlock>>>(d_update, d_qLength, qMaxLength, nodeNumber); cudaCheckError( __LINE__, cudaMemset(d_update, 0, sizeof(char)*nodeNumber) ); } cudaCheckError( __LINE__, cudaMemcpy(&qLength,d_qLength,sizeof(unsigned int), cudaMemcpyDeviceToHost)); if ( solution==9 ) { cudaCheckError( __LINE__, cudaMemcpy(&qLength_1,d_qLength_1,sizeof(unsigned int), cudaMemcpyDeviceToHost)); qLength = qLength + qLength_1; } //printf("Working set size is %d\n", qLength); if (qLength==0) break; } iteration++; }while(1); cudaCheckError( __LINE__, cudaMemcpy( costArray, d_costArray, sizeof(int)*nodeNumber, cudaMemcpyDeviceToHost) ); end_time = gettime(); printf("SSSP iteration:\t\t%lf\n",end_time-time); printf("SSSP terminated in %d iterations\n", iteration); cudaFree(d_vertexArray); cudaFree(d_costArray); cudaFree(d_edgeArray); cudaFree(d_weightArray); }
3546776722d64a97ae2c1763520514cb1f82cd87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // https://legion.stanford.edu/tutorial/hello_world.html #include <cstdio> #include "legion.h" #include "node_aware_mapper2.hpp" using namespace Legion; enum TaskID { HELLO_WORLD_ID, }; __global__ void hello_world_kernel() { printf("Hello World! (from the GPU)\n"); } void hello_world_gpu_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { hipLaunchKernelGGL(( hello_world_kernel), dim3(1),dim3(1), 0, 0, ); } void hello_world_cpu_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { printf("Hello World! (from the CPU)\n"); } int main(int argc, char **argv) { Runtime::set_top_level_task_id(HELLO_WORLD_ID); // seems to prefer whichever task is registered first { TaskVariantRegistrar registrar(HELLO_WORLD_ID, "hello_world GPU variant"); registrar.add_constraint(ProcessorConstraint(Processor::TOC_PROC)); Runtime::preregister_task_variant<hello_world_gpu_task>(registrar, "hello_world task"); } { TaskVariantRegistrar registrar(HELLO_WORLD_ID, "hello_world CPU variant"); registrar.add_constraint(ProcessorConstraint(Processor::LOC_PROC)); Runtime::preregister_task_variant<hello_world_cpu_task>(registrar, "hello_world task"); } Runtime::add_registration_callback(NodeAwareMapper::mapper_registration); return Runtime::start(argc, argv); }
3546776722d64a97ae2c1763520514cb1f82cd87.cu
// https://legion.stanford.edu/tutorial/hello_world.html #include <cstdio> #include "legion.h" #include "node_aware_mapper2.hpp" using namespace Legion; enum TaskID { HELLO_WORLD_ID, }; __global__ void hello_world_kernel() { printf("Hello World! (from the GPU)\n"); } void hello_world_gpu_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { hello_world_kernel<<<1,1>>>(); } void hello_world_cpu_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { printf("Hello World! (from the CPU)\n"); } int main(int argc, char **argv) { Runtime::set_top_level_task_id(HELLO_WORLD_ID); // seems to prefer whichever task is registered first { TaskVariantRegistrar registrar(HELLO_WORLD_ID, "hello_world GPU variant"); registrar.add_constraint(ProcessorConstraint(Processor::TOC_PROC)); Runtime::preregister_task_variant<hello_world_gpu_task>(registrar, "hello_world task"); } { TaskVariantRegistrar registrar(HELLO_WORLD_ID, "hello_world CPU variant"); registrar.add_constraint(ProcessorConstraint(Processor::LOC_PROC)); Runtime::preregister_task_variant<hello_world_cpu_task>(registrar, "hello_world task"); } Runtime::add_registration_callback(NodeAwareMapper::mapper_registration); return Runtime::start(argc, argv); }
b4a82880d5f7bfc207212e54e88b292c36f92f00.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/detail/utility_wrappers.hpp> #include <raft/random/rng.cuh> #include <thrust/sequence.h> #include <rmm/exec_policy.hpp> namespace cugraph { namespace detail { template <typename value_t> void uniform_random_fill(rmm::cuda_stream_view const& stream_view, value_t* d_value, size_t size, value_t min_value, value_t max_value, uint64_t seed) { raft::random::Rng rng(seed); rng.uniform<value_t, size_t>(d_value, size, min_value, max_value, stream_view.value()); } template void uniform_random_fill(rmm::cuda_stream_view const& stream_view, float* d_value, size_t size, float min_value, float max_value, uint64_t seed); template void uniform_random_fill(rmm::cuda_stream_view const& stream_view, double* d_value, size_t size, double min_value, double max_value, uint64_t seed); template <typename value_t> void sequence_fill(rmm::cuda_stream_view const& stream_view, value_t* d_value, size_t size, value_t start_value) { thrust::sequence(rmm::exec_policy(stream_view), d_value, d_value + size, start_value); } template void sequence_fill(rmm::cuda_stream_view const& stream_view, int32_t* d_value, size_t size, int32_t start_value); template void sequence_fill(rmm::cuda_stream_view const& stream_view, int64_t* d_value, size_t size, int64_t start_value); template <typename vertex_t> vertex_t compute_maximum_vertex_id(rmm::cuda_stream_view const& stream_view, rmm::device_uvector<vertex_t> const& d_edgelist_rows, rmm::device_uvector<vertex_t> const& d_edgelist_cols) { auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(d_edgelist_rows.begin(), d_edgelist_cols.begin())); return thrust::transform_reduce( rmm::exec_policy(stream_view), edge_first, edge_first + d_edgelist_rows.size(), [] __device__(auto e) { return ::max(thrust::get<0>(e), thrust::get<1>(e)); }, vertex_t{0}, thrust::maximum<vertex_t>()); } template int32_t compute_maximum_vertex_id(rmm::cuda_stream_view const& stream_view, rmm::device_uvector<int32_t> const& d_edgelist_rows, rmm::device_uvector<int32_t> const& d_edgelist_cols); template int64_t compute_maximum_vertex_id(rmm::cuda_stream_view const& stream_view, rmm::device_uvector<int64_t> const& d_edgelist_rows, rmm::device_uvector<int64_t> const& d_edgelist_cols); } // namespace detail } // namespace cugraph
b4a82880d5f7bfc207212e54e88b292c36f92f00.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/detail/utility_wrappers.hpp> #include <raft/random/rng.cuh> #include <thrust/sequence.h> #include <rmm/exec_policy.hpp> namespace cugraph { namespace detail { template <typename value_t> void uniform_random_fill(rmm::cuda_stream_view const& stream_view, value_t* d_value, size_t size, value_t min_value, value_t max_value, uint64_t seed) { raft::random::Rng rng(seed); rng.uniform<value_t, size_t>(d_value, size, min_value, max_value, stream_view.value()); } template void uniform_random_fill(rmm::cuda_stream_view const& stream_view, float* d_value, size_t size, float min_value, float max_value, uint64_t seed); template void uniform_random_fill(rmm::cuda_stream_view const& stream_view, double* d_value, size_t size, double min_value, double max_value, uint64_t seed); template <typename value_t> void sequence_fill(rmm::cuda_stream_view const& stream_view, value_t* d_value, size_t size, value_t start_value) { thrust::sequence(rmm::exec_policy(stream_view), d_value, d_value + size, start_value); } template void sequence_fill(rmm::cuda_stream_view const& stream_view, int32_t* d_value, size_t size, int32_t start_value); template void sequence_fill(rmm::cuda_stream_view const& stream_view, int64_t* d_value, size_t size, int64_t start_value); template <typename vertex_t> vertex_t compute_maximum_vertex_id(rmm::cuda_stream_view const& stream_view, rmm::device_uvector<vertex_t> const& d_edgelist_rows, rmm::device_uvector<vertex_t> const& d_edgelist_cols) { auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(d_edgelist_rows.begin(), d_edgelist_cols.begin())); return thrust::transform_reduce( rmm::exec_policy(stream_view), edge_first, edge_first + d_edgelist_rows.size(), [] __device__(auto e) { return std::max(thrust::get<0>(e), thrust::get<1>(e)); }, vertex_t{0}, thrust::maximum<vertex_t>()); } template int32_t compute_maximum_vertex_id(rmm::cuda_stream_view const& stream_view, rmm::device_uvector<int32_t> const& d_edgelist_rows, rmm::device_uvector<int32_t> const& d_edgelist_cols); template int64_t compute_maximum_vertex_id(rmm::cuda_stream_view const& stream_view, rmm::device_uvector<int64_t> const& d_edgelist_rows, rmm::device_uvector<int64_t> const& d_edgelist_cols); } // namespace detail } // namespace cugraph
ip_dynamic.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/time.h> #include <iostream> #include <cstdlib> using namespace std; #define VECNUM 50000 #define VECLEN 1000 #define HALFNUM 25000000 int *inputA, *inputB; int *devInputA, *devInputB, *devOut, *devOut2; int *outCPU, *outGPU; void init() { delete outGPU; int i, j, idx; inputA = new int[VECNUM * VECLEN]; inputB = new int[VECNUM * VECLEN]; for(i = 0; i < VECNUM; i++){ for(j = 0; j < VECLEN; j++){ idx = i*VECLEN + j; if(idx<HALFNUM){ if(idx%2==0){ //if(idx=even number) =>set value=1 inputA[idx] =1; inputB[idx] =1; } else{ //if(idx=odd number) =>set value=0 inputA[idx] =0; inputB[idx] =0; } } else{ inputA[idx] =3; inputB[idx] =3; } } } outCPU = new int[VECNUM](); outGPU = new int[VECNUM](); } void initGPU() { int inputSize = sizeof(int)*VECNUM*VECLEN; hipMalloc(&devInputA, inputSize); hipMalloc(&devInputB, inputSize); hipMalloc(&devOut, sizeof(int)*VECNUM/2); hipMalloc(&devOut2, sizeof(int)*VECNUM/2); hipMemcpy(devInputA, inputA, inputSize, hipMemcpyHostToDevice); hipMemcpy(devInputB, inputB, inputSize, hipMemcpyHostToDevice); } void innerProductCPU() { int i, j, acc, idx; for(i = 0; i < VECNUM; i++){ acc = 0; for(j = 0; j < VECLEN; j++){ idx = i*VECLEN + j; acc += inputA[idx] * inputB[idx]; } outCPU[i] = acc; } } //GPU dynamic version __global__ void innerProductGPU_firsthalf(int *A, int *B, int *out) { int y = blockIdx.x; int x = threadIdx.x; int idx = y * VECLEN + x*2;//just only compute the data that its idx is even number __shared__ int tmp[VECLEN]; tmp[x*2] = A[idx] * B[idx]; __syncthreads(); if(x == 0){ int i, sum = 0; for(i = 0; i < VECLEN/2; i++) sum += tmp[i*2]; out[y] = sum; } } __global__ void innerProductGPU_secondhalf(int *A, int *B, int *out) { int y = blockIdx.x; int x = threadIdx.x; int idx = y * VECLEN + x; __shared__ int tmp[VECLEN]; tmp[x] = A[idx] * B[idx]; __syncthreads(); if(x == 0){ int i, sum = 0; for(i = 0; i < VECLEN; i++) sum += tmp[i]; out[y] = sum; } } __global__ void innerProductGPU_dynpar(int *A, int *B, int *out,int *A2, int *B2, int *out2) { dim3 threadsPerBlock_firsthalf(VECLEN/2); dim3 threadsPerBlock_secondhalf(VECLEN); dim3 numBlocks_firsthalf(VECNUM/2); dim3 numBlocks_secondhalf(VECNUM/2); if(threadIdx.x == 0) hipLaunchKernelGGL(( innerProductGPU_firsthalf), dim3(numBlocks_firsthalf),dim3(threadsPerBlock_firsthalf), 0, 0, A,B,out); else if (threadIdx.x == 1) hipLaunchKernelGGL(( innerProductGPU_secondhalf), dim3(numBlocks_secondhalf),dim3(threadsPerBlock_secondhalf), 0, 0, A2,B2,out2); hipDeviceSynchronize(); } //other function bool checker(){ int i; for(i = 0; i < VECNUM; i++){ if(outCPU[i] != outGPU[i]){ cout << "The element: " << i << " is wrong!\n"; cout << "outCPU[" << i << "] = " << outCPU[i] << endl; cout << "outGPU[" << i << "] = " << outGPU[i] << endl; return false; } } return true; } int timespec_diff_us(timespec& t1, timespec& t2) { return (t2.tv_sec - t1.tv_sec) * 1e6 + (t2.tv_nsec - t1.tv_nsec) / 1e3; } int main(int argc, char** argv) { int outSize = sizeof(int)*VECNUM; init(); initGPU(); timespec time_begin, time_end; //CPU version clock_gettime(CLOCK_REALTIME, &time_begin); innerProductCPU(); clock_gettime(CLOCK_REALTIME, &time_end); // cout << "CPU time for executing inner-product = " << timespec_diff_us(time_begin, time_end) / 1000 << "ms" << endl; // GPU dynamic version dim3 threadsPerBlock(2); dim3 numBlocks(1); clock_gettime(CLOCK_REALTIME, &time_begin); hipLaunchKernelGGL(( innerProductGPU_dynpar), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, devInputA, devInputB, devOut,devInputA+HALFNUM, devInputB+HALFNUM, devOut2); hipDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &time_end); cout << "GPU time for executing dynamic inner-product = " << timespec_diff_us(time_begin, time_end) << "us" << endl; //data copy from GPU to CPU hipMemcpy(outGPU, devOut, outSize/2, hipMemcpyDeviceToHost); hipMemcpy(outGPU+25000, devOut2, outSize/2, hipMemcpyDeviceToHost); //check if(checker()) cout << "Congratulations! You pass the check." << endl; else cout << "Sorry! Your result is wrong." << endl; //releas space hipFree(&devInputA); hipFree(&devInputB); hipFree(&devOut); hipFree(&devOut2); delete outGPU; return 0; }
ip_dynamic.cu
#include <stdio.h> #include <sys/time.h> #include <iostream> #include <cstdlib> using namespace std; #define VECNUM 50000 #define VECLEN 1000 #define HALFNUM 25000000 int *inputA, *inputB; int *devInputA, *devInputB, *devOut, *devOut2; int *outCPU, *outGPU; void init() { delete outGPU; int i, j, idx; inputA = new int[VECNUM * VECLEN]; inputB = new int[VECNUM * VECLEN]; for(i = 0; i < VECNUM; i++){ for(j = 0; j < VECLEN; j++){ idx = i*VECLEN + j; if(idx<HALFNUM){ if(idx%2==0){ //if(idx=even number) =>set value=1 inputA[idx] =1; inputB[idx] =1; } else{ //if(idx=odd number) =>set value=0 inputA[idx] =0; inputB[idx] =0; } } else{ inputA[idx] =3; inputB[idx] =3; } } } outCPU = new int[VECNUM](); outGPU = new int[VECNUM](); } void initGPU() { int inputSize = sizeof(int)*VECNUM*VECLEN; cudaMalloc(&devInputA, inputSize); cudaMalloc(&devInputB, inputSize); cudaMalloc(&devOut, sizeof(int)*VECNUM/2); cudaMalloc(&devOut2, sizeof(int)*VECNUM/2); cudaMemcpy(devInputA, inputA, inputSize, cudaMemcpyHostToDevice); cudaMemcpy(devInputB, inputB, inputSize, cudaMemcpyHostToDevice); } void innerProductCPU() { int i, j, acc, idx; for(i = 0; i < VECNUM; i++){ acc = 0; for(j = 0; j < VECLEN; j++){ idx = i*VECLEN + j; acc += inputA[idx] * inputB[idx]; } outCPU[i] = acc; } } //GPU dynamic version __global__ void innerProductGPU_firsthalf(int *A, int *B, int *out) { int y = blockIdx.x; int x = threadIdx.x; int idx = y * VECLEN + x*2;//just only compute the data that its idx is even number __shared__ int tmp[VECLEN]; tmp[x*2] = A[idx] * B[idx]; __syncthreads(); if(x == 0){ int i, sum = 0; for(i = 0; i < VECLEN/2; i++) sum += tmp[i*2]; out[y] = sum; } } __global__ void innerProductGPU_secondhalf(int *A, int *B, int *out) { int y = blockIdx.x; int x = threadIdx.x; int idx = y * VECLEN + x; __shared__ int tmp[VECLEN]; tmp[x] = A[idx] * B[idx]; __syncthreads(); if(x == 0){ int i, sum = 0; for(i = 0; i < VECLEN; i++) sum += tmp[i]; out[y] = sum; } } __global__ void innerProductGPU_dynpar(int *A, int *B, int *out,int *A2, int *B2, int *out2) { dim3 threadsPerBlock_firsthalf(VECLEN/2); dim3 threadsPerBlock_secondhalf(VECLEN); dim3 numBlocks_firsthalf(VECNUM/2); dim3 numBlocks_secondhalf(VECNUM/2); if(threadIdx.x == 0) innerProductGPU_firsthalf<<<numBlocks_firsthalf,threadsPerBlock_firsthalf>>>(A,B,out); else if (threadIdx.x == 1) innerProductGPU_secondhalf<<<numBlocks_secondhalf,threadsPerBlock_secondhalf>>>(A2,B2,out2); cudaDeviceSynchronize(); } //other function bool checker(){ int i; for(i = 0; i < VECNUM; i++){ if(outCPU[i] != outGPU[i]){ cout << "The element: " << i << " is wrong!\n"; cout << "outCPU[" << i << "] = " << outCPU[i] << endl; cout << "outGPU[" << i << "] = " << outGPU[i] << endl; return false; } } return true; } int timespec_diff_us(timespec& t1, timespec& t2) { return (t2.tv_sec - t1.tv_sec) * 1e6 + (t2.tv_nsec - t1.tv_nsec) / 1e3; } int main(int argc, char** argv) { int outSize = sizeof(int)*VECNUM; init(); initGPU(); timespec time_begin, time_end; //CPU version clock_gettime(CLOCK_REALTIME, &time_begin); innerProductCPU(); clock_gettime(CLOCK_REALTIME, &time_end); // cout << "CPU time for executing inner-product = " << timespec_diff_us(time_begin, time_end) / 1000 << "ms" << endl; // GPU dynamic version dim3 threadsPerBlock(2); dim3 numBlocks(1); clock_gettime(CLOCK_REALTIME, &time_begin); innerProductGPU_dynpar<<<numBlocks,threadsPerBlock>>>(devInputA, devInputB, devOut,devInputA+HALFNUM, devInputB+HALFNUM, devOut2); cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &time_end); cout << "GPU time for executing dynamic inner-product = " << timespec_diff_us(time_begin, time_end) << "us" << endl; //data copy from GPU to CPU cudaMemcpy(outGPU, devOut, outSize/2, cudaMemcpyDeviceToHost); cudaMemcpy(outGPU+25000, devOut2, outSize/2, cudaMemcpyDeviceToHost); //check if(checker()) cout << "Congratulations! You pass the check." << endl; else cout << "Sorry! Your result is wrong." << endl; //releas space cudaFree(&devInputA); cudaFree(&devInputB); cudaFree(&devOut); cudaFree(&devOut2); delete outGPU; return 0; }
88d0d1c347e242127dab7c278074d50d2f920e80.hip
// !!! This is a file automatically generated by hipify!!! // High level matrix multiplication on GPU using CUDA with Thrust, CURAND and CUBLAS // C(m,n) = A(m,k) * B(k,n) #include <iostream> #include <cstdlib> #include <ctime> #include <rocblas.h> #include <hiprand/hiprand.h> #include <array> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cusolverDn.h> // Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU void GPU_fill_rand(double *A, int nr_rows_A, int nr_cols_A) { // Create a pseudo-random number generator hiprandGenerator_t prng; hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); // Fill the array with random numbers on the device hiprandGenerateUniformDouble(prng, A, nr_rows_A * nr_cols_A); } // Multiply the arrays A and B on GPU and save the result in C // C(m,n) = A(m,k) * B(k,n) void gpu_blas_mmul(hipblasHandle_t* handle, const double *A, const double *B, double *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const double alf = 1; const double bet = 0; const double *alpha = &alf; const double *beta = &bet; // Create a handle for CUBLAS // hipblasHandle_t handle; // hipblasCreate(&handle); // Do the actual multiplication hipblasDgemm(*handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle // hipblasDestroy(*handle); } void gpu_blas_dsyrk(hipblasHandle_t* handle,const double *A, double *C, const int n, const int k) { int lda=k,ldc=k; const double alf = 1; const double bet = 0; const double *alpha = &alf; const double *beta = &bet; // Create a handle for CUBLAS // hipblasHandle_t handle; // hipblasCreate(&handle); // Do the actual multiplication hipblasDsyrk(*handle, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_T, n, k, alpha, A,lda, beta, C, ldc); // Destroy the handle // hipblasDestroy(*handle); } void gpu_solv_dpotrf(hipsolverDnHandle_t* handle,double *A, double* W,const int n, const int k, int* devInfo) { int lda=n; // Create a handle for CUBLAS // hipblasHandle_t handle; // hipblasCreate(&handle); // Do the actual multiplication hipsolverDnDpotrf(*handle, HIPBLAS_FILL_MODE_UPPER, n, A, lda, W, k, devInfo); // Destroy the handle // hipsolverDnDestroy(*handle); } void gpu_blas_dtrsm(hipblasHandle_t* handle, double *A, double *B, int a_b_rows, int a_b_cols){ int lda=a_b_rows,ldb=a_b_cols; const double alf = 1; const double *alpha = &alf; hipblasDtrsm(*handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_T, HIPBLAS_DIAG_NON_UNIT, a_b_rows, a_b_cols, alpha, A, lda, B, ldb); // Destroy the handle hipblasDestroy(*handle); } //Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format void print_matrix(const thrust::device_vector<double> &A, int nr_rows_A, int nr_cols_A) { for(int i = 0; i < nr_rows_A; ++i){ for(int j = 0; j < nr_cols_A; ++j){ std::cout << A[j * nr_rows_A + i] << " "; } std::cout << std::endl; } std::cout << std::endl; } //int main() { // // int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; // // // for simplicity we are going to use square arrays // nr_rows_A = nr_cols_A = nr_rows_B = nr_cols_B = nr_rows_C = nr_cols_C = 512; // // thrust::device_vector<double> d_A(nr_rows_A * nr_cols_A), d_B(nr_rows_B * nr_cols_B), d_C(nr_rows_C * nr_cols_C); // // // Fill the Input arrays // GPU_fill_rand(thrust::raw_pointer_cast(&d_A[0]), nr_rows_A, nr_cols_A); // GPU_fill_rand(thrust::raw_pointer_cast(&d_B[0]), nr_rows_B, nr_cols_B); // // // bool potrfMode = false; // // int batch_count = 10; // // std::array<thrust::device_vector<double>,10> answerVectors; // std::array<thrust::device_vector<double>,10> answerVectorsB; // // std::array<hipblasHandle_t, 10> handles; // std::array<hipStream_t, 10> streams; // // std::array<hipblasHandle_t, 10> handlesB; // std::array<hipStream_t, 10> streamsB; // // // std::array<hipsolverDnHandle_t, 10> handlesPotrf; // // std::array<hipStream_t, 10> streamsPotrf; // // std::array<int, 10> potrfAns; // // hipStream_t *streams = (hipStream_t *) malloc(batch_count*sizeof(hipStream_t)); // // // hipsolverDnHandle_t handlePotrf; // // hipsolverDnCreate(&handlePotrf); // // int workspaceSize = -1; // // hipsolverDnDpotrf_bufferSize(handlePotrf,HIPBLAS_FILL_MODE_UPPER,nr_rows_A,thrust::raw_pointer_cast(&d_A[0]),nr_rows_A,&workspaceSize ); // // nr_rows_C = nr_cols_C = workspaceSize; // for(int i=0; i<batch_count; i++) // { // std::cout << "batch " << i << " initializing"<<std::endl; // // hipStreamCreate(&streams[i]); // hipblasCreate(&handles[i]); // // hipStreamCreate(&streamsB[i]); // hipblasCreate(&handlesB[i]); // // hipblasSetStream(handles[i], streams[i]); // // hipblasSetStream(handlesB[i], streamsB[i]); // // // hipStreamCreate(&streamsPotrf[i]); // // hipsolverDnCreate(&handlesPotrf[i]); // // hipsolverDnSetStream(handlesPotrf[i], streamsPotrf[i]); // // thrust::device_vector<double> d_C(nr_rows_C * nr_cols_C); // thrust::device_vector<double> d_C2(nr_rows_C * nr_cols_C); // // thrust::device_vector<double> d_C2(workspaceSize); // // answerVectors[i] = d_C; // answerVectorsB[i] = d_C2; // } // // // for(int i=0; i<batch_count; i++){ // // Set CUDA stream // // std::cout << "passou aqui" << std::endl; // // // DGEMM: C = alpha*A*B + beta*C // gpu_blas_mmul(&handles[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&d_B[0]), thrust::raw_pointer_cast(&answerVectors[i][0]), nr_rows_A, nr_cols_A, nr_cols_B); // // // gpu_blas_dsyrk(&handlesB[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&answerVectorsB[i][0]), nr_rows_A, nr_cols_C); // // // gpu_solv_dpotrf(&handlesPotrf[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&answerVectorsB[i][0]), nr_rows_A, workspaceSize, &potrfAns[i]); // // gpu_blas_dtrsm(&handlesB[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&d_B[0]), nr_rows_A, nr_cols_B); // } // // // // // std::cout << "finished" <<std::endl; // // return 0; //}
88d0d1c347e242127dab7c278074d50d2f920e80.cu
// High level matrix multiplication on GPU using CUDA with Thrust, CURAND and CUBLAS // C(m,n) = A(m,k) * B(k,n) #include <iostream> #include <cstdlib> #include <ctime> #include <cublas_v2.h> #include <curand.h> #include <array> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cusolverDn.h> // Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU void GPU_fill_rand(double *A, int nr_rows_A, int nr_cols_A) { // Create a pseudo-random number generator curandGenerator_t prng; curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); // Fill the array with random numbers on the device curandGenerateUniformDouble(prng, A, nr_rows_A * nr_cols_A); } // Multiply the arrays A and B on GPU and save the result in C // C(m,n) = A(m,k) * B(k,n) void gpu_blas_mmul(cublasHandle_t* handle, const double *A, const double *B, double *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const double alf = 1; const double bet = 0; const double *alpha = &alf; const double *beta = &bet; // Create a handle for CUBLAS // cublasHandle_t handle; // cublasCreate(&handle); // Do the actual multiplication cublasDgemm(*handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle // cublasDestroy(*handle); } void gpu_blas_dsyrk(cublasHandle_t* handle,const double *A, double *C, const int n, const int k) { int lda=k,ldc=k; const double alf = 1; const double bet = 0; const double *alpha = &alf; const double *beta = &bet; // Create a handle for CUBLAS // cublasHandle_t handle; // cublasCreate(&handle); // Do the actual multiplication cublasDsyrk(*handle, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_T, n, k, alpha, A,lda, beta, C, ldc); // Destroy the handle // cublasDestroy(*handle); } void gpu_solv_dpotrf(cusolverDnHandle_t* handle,double *A, double* W,const int n, const int k, int* devInfo) { int lda=n; // Create a handle for CUBLAS // cublasHandle_t handle; // cublasCreate(&handle); // Do the actual multiplication cusolverDnDpotrf(*handle, CUBLAS_FILL_MODE_UPPER, n, A, lda, W, k, devInfo); // Destroy the handle // cusolverDnDestroy(*handle); } void gpu_blas_dtrsm(cublasHandle_t* handle, double *A, double *B, int a_b_rows, int a_b_cols){ int lda=a_b_rows,ldb=a_b_cols; const double alf = 1; const double *alpha = &alf; cublasDtrsm(*handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, a_b_rows, a_b_cols, alpha, A, lda, B, ldb); // Destroy the handle cublasDestroy(*handle); } //Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format void print_matrix(const thrust::device_vector<double> &A, int nr_rows_A, int nr_cols_A) { for(int i = 0; i < nr_rows_A; ++i){ for(int j = 0; j < nr_cols_A; ++j){ std::cout << A[j * nr_rows_A + i] << " "; } std::cout << std::endl; } std::cout << std::endl; } //int main() { // // int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; // // // for simplicity we are going to use square arrays // nr_rows_A = nr_cols_A = nr_rows_B = nr_cols_B = nr_rows_C = nr_cols_C = 512; // // thrust::device_vector<double> d_A(nr_rows_A * nr_cols_A), d_B(nr_rows_B * nr_cols_B), d_C(nr_rows_C * nr_cols_C); // // // Fill the Input arrays // GPU_fill_rand(thrust::raw_pointer_cast(&d_A[0]), nr_rows_A, nr_cols_A); // GPU_fill_rand(thrust::raw_pointer_cast(&d_B[0]), nr_rows_B, nr_cols_B); // // // bool potrfMode = false; // // int batch_count = 10; // // std::array<thrust::device_vector<double>,10> answerVectors; // std::array<thrust::device_vector<double>,10> answerVectorsB; // // std::array<cublasHandle_t, 10> handles; // std::array<cudaStream_t, 10> streams; // // std::array<cublasHandle_t, 10> handlesB; // std::array<cudaStream_t, 10> streamsB; // // // std::array<cusolverDnHandle_t, 10> handlesPotrf; // // std::array<cudaStream_t, 10> streamsPotrf; // // std::array<int, 10> potrfAns; // // cudaStream_t *streams = (cudaStream_t *) malloc(batch_count*sizeof(cudaStream_t)); // // // cusolverDnHandle_t handlePotrf; // // cusolverDnCreate(&handlePotrf); // // int workspaceSize = -1; // // cusolverDnDpotrf_bufferSize(handlePotrf,CUBLAS_FILL_MODE_UPPER,nr_rows_A,thrust::raw_pointer_cast(&d_A[0]),nr_rows_A,&workspaceSize ); // // nr_rows_C = nr_cols_C = workspaceSize; // for(int i=0; i<batch_count; i++) // { // std::cout << "batch " << i << " initializing"<<std::endl; // // cudaStreamCreate(&streams[i]); // cublasCreate(&handles[i]); // // cudaStreamCreate(&streamsB[i]); // cublasCreate(&handlesB[i]); // // cublasSetStream(handles[i], streams[i]); // // cublasSetStream(handlesB[i], streamsB[i]); // // // cudaStreamCreate(&streamsPotrf[i]); // // cusolverDnCreate(&handlesPotrf[i]); // // cusolverDnSetStream(handlesPotrf[i], streamsPotrf[i]); // // thrust::device_vector<double> d_C(nr_rows_C * nr_cols_C); // thrust::device_vector<double> d_C2(nr_rows_C * nr_cols_C); // // thrust::device_vector<double> d_C2(workspaceSize); // // answerVectors[i] = d_C; // answerVectorsB[i] = d_C2; // } // // // for(int i=0; i<batch_count; i++){ // // Set CUDA stream // // std::cout << "passou aqui" << std::endl; // // // DGEMM: C = alpha*A*B + beta*C // gpu_blas_mmul(&handles[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&d_B[0]), thrust::raw_pointer_cast(&answerVectors[i][0]), nr_rows_A, nr_cols_A, nr_cols_B); // // // gpu_blas_dsyrk(&handlesB[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&answerVectorsB[i][0]), nr_rows_A, nr_cols_C); // // // gpu_solv_dpotrf(&handlesPotrf[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&answerVectorsB[i][0]), nr_rows_A, workspaceSize, &potrfAns[i]); // // gpu_blas_dtrsm(&handlesB[i],thrust::raw_pointer_cast(&d_A[0]), thrust::raw_pointer_cast(&d_B[0]), nr_rows_A, nr_cols_B); // } // // // // // std::cout << "finished" <<std::endl; // // return 0; //}
7c70b43c34d0ae369a17219f0b1893e373e77c17.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/blob.hpp" #include "caffe/layers/EigenLog_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/layers/loss_layer.hpp" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<iostream> #include<stdlib.h> #include<stdio.h> #include <cusolverDn.h> #include <hip/hip_runtime_api.h> #include <algorithm> namespace caffe { template <typename Dtype> void EigenLogLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int dim = bottom[0]->num(); // --- CUDA solver initialization int work_size = 0; hipsolverDnHandle_t solver_handle; hipsolverDnCreate(&solver_handle); hipsolverDnSgesvd_bufferSize(solver_handle, dim, dim, &work_size); // --- CUDA SVD execution float *work; int *devInfo; hipMalloc(&devInfo, sizeof(int)); hipMalloc(&work, work_size * sizeof(float)); caffe_gpu_memcpy(bottom[0]->count() * sizeof(Dtype), bottom[0]->gpu_data(), cov.mutable_gpu_data()); hipsolverDnSgesvd(solver_handle, 'A', 'A', (int)dim, (int)dim, (float *)cov.mutable_gpu_data(), (int)dim, (float *)eig.mutable_gpu_data(), (float *)U.mutable_gpu_data(), (int)dim,(float *)V.mutable_gpu_data(), (int)dim, work, work_size, NULL, devInfo); hipDeviceSynchronize(); hipsolverDnDestroy(solver_handle); int i; Dtype* eig_pointer=eig.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_pointer[i] = eig_pointer[i]+0.001; caffe_log(1., &eig_pointer[i], &eig_pointer[i]); } //eig: get the log value of eigenvalue caffe_gpu_set(dim*dim, Dtype(0), eig_matx.mutable_gpu_data()); Dtype* eig_matx_pointer=eig_matx.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_matx_pointer[i*dim+i]=eig_pointer[i];} // turn log(eigenvalue) into a matrix caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),eig_matx.gpu_data(),0.,eig_matx.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.0,eig_matx.gpu_data(),U.gpu_data(),0.,top[0]->mutable_gpu_data()); // U*log(eigenvalue)*UT } template <typename Dtype> void EigenLogLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int dim = bottom[0]->num(); // --- CUDA solver initialization int work_size = 0; hipsolverDnHandle_t solver_handle; hipsolverDnCreate(&solver_handle); hipsolverDnSgesvd_bufferSize(solver_handle, dim, dim, &work_size); // --- CUDA SVD execution float *work; int *devInfo; hipMalloc(&devInfo, sizeof(int)); hipMalloc(&work, work_size * sizeof(float)); caffe_gpu_memcpy(bottom[0]->count() * sizeof(Dtype), bottom[0]->gpu_data(), cov.mutable_gpu_data()); hipsolverDnSgesvd(solver_handle, 'A', 'A', (int)dim, (int)dim, (float *)cov.mutable_gpu_data(), (int)dim, (float *)eig.mutable_gpu_data(), (float *)U.mutable_gpu_data(), (int)dim,(float *)V.mutable_gpu_data(), (int)dim, work, work_size, NULL, devInfo); hipDeviceSynchronize(); hipsolverDnDestroy(solver_handle); int i,j; Dtype* eig_pointer=eig.mutable_cpu_data(); Dtype* eig_log_pointer=eig_log.mutable_cpu_data(); Dtype* eig_inv_pointer=eig_inv.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_inv_pointer[i]=eig_pointer[i]; if(eig_pointer[i]!=0){ eig_inv_pointer[i]=1./eig_pointer[i];} //eig_inv_pointer: get the inverse of eigenvalue eig_log_pointer[i]=eig_pointer[i]; eig_pointer[i]= eig_pointer[i]+0.001; caffe_log(1., &eig_pointer[i], &eig_log_pointer[i]); //eig_log_pointer: get the log of eigenvalue } caffe_gpu_set(dim*dim, Dtype(0), eig_matx.mutable_gpu_data()); caffe_gpu_set(dim*dim, Dtype(0), eig_log_matx.mutable_gpu_data()); caffe_gpu_set(dim*dim, Dtype(0), eig_inv_matx.mutable_gpu_data()); caffe_gpu_set(dim*dim, Dtype(0), iden_matx.mutable_gpu_data()); Dtype* eig_matx_pointer=eig_matx.mutable_cpu_data(); Dtype* eig_log_matx_pointer=eig_log_matx.mutable_cpu_data(); Dtype* eig_inv_matx_pointer=eig_inv_matx.mutable_cpu_data(); Dtype* iden_matx_pointer=iden_matx.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_matx_pointer[i*dim+i]=eig_pointer[i]; //eig_matx: get eigenvalue matrix eig_log_matx_pointer[i*dim+i]=eig_log_pointer[i]; //eig_log_matx: get log(eigenvalue) matrix eig_inv_matx_pointer[i*dim+i]=eig_inv_pointer[i]; //eig_inv_pointer: get the inverse of eigenval matrix iden_matx_pointer[i*dim+i]=1.; //iden_matrix: get identity matrix } caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),iden_matx.gpu_data(),0.,U.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans,CblasNoTrans,dim,dim,dim,0.5,top[0]->gpu_diff(),iden_matx.gpu_data(),0.,diff_sys.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,dim,dim,dim,0.5,top[0]->gpu_diff(),iden_matx.gpu_data(),1.,diff_sys.mutable_gpu_data()); // diff_sys: sys(top[0]->difference) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,2.,diff_sys.gpu_data(),U.gpu_data(),0.,dU.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,dU.gpu_data(),eig_log_matx.gpu_data(),0.,dU.mutable_gpu_data()); //dU function(7) in the paper caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,dim,dim,dim,1.,eig_inv_matx.gpu_data(),U.gpu_data(),0.,deigen.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,deigen.gpu_data(),diff_sys.gpu_data(),0.,deigen.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,deigen.gpu_data(),U.gpu_data(),0.,deigen.mutable_gpu_data()); //deigen function(8) in the paper Dtype* deigen_pointer=deigen.mutable_cpu_data(); caffe_gpu_set(dim*dim, Dtype(0), P.mutable_gpu_data()); Dtype* P_pointer=P.mutable_cpu_data(); for(i=0;i<dim;i++){ for(j=0;j<dim;j++){ if(i!=j){ if(eig_pointer[i]!=eig_pointer[j]){ P_pointer[i*dim+j]=1./(eig_pointer[i]-eig_pointer[j]); //P }} }} caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,dim,dim,dim,1.,P.mutable_gpu_data(),iden_matx.gpu_data(),0.,PT.mutable_gpu_data()); //PT: P(transpose) caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),dU.gpu_data(),0.,sys.mutable_gpu_data()); caffe_gpu_mul(dim*dim,PT.gpu_data(),sys.gpu_data(),P_sys.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,dim,dim,dim,0.5,P_sys.gpu_data(),iden_matx.gpu_data(),0.5,P_sys.mutable_gpu_data()); // (PT o (U*UT))sym for(i=0;i<dim;i++){ for(j=0;j<dim;j++){ if(i!=j){ deigen_pointer[i*dim+j]=0.; } }} // (deigen)diag caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),P_sys.gpu_data(),0.,temp1.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,dim,dim,dim,1.0,temp1.gpu_data(),U.gpu_data(),0.,temp1.mutable_gpu_data()); // U*(PT o (U*UT))sym*UT caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),deigen.gpu_data(),0.,temp2.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,dim,dim,dim,1.0,temp2.gpu_data(),U.gpu_data(),0.,temp2.mutable_gpu_data()); // U*(deigen)diag*UT caffe_gpu_add(dim*dim,temp1.gpu_data(),temp2.gpu_data(),bottom[0]->mutable_gpu_diff()); //function(6) in the paper } INSTANTIATE_LAYER_GPU_FUNCS(EigenLogLayer); } // namespace caffe
7c70b43c34d0ae369a17219f0b1893e373e77c17.cu
#include <vector> #include "caffe/blob.hpp" #include "caffe/layers/EigenLog_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/layers/loss_layer.hpp" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<iostream> #include<stdlib.h> #include<stdio.h> #include <cusolverDn.h> #include <cuda_runtime_api.h> #include <algorithm> namespace caffe { template <typename Dtype> void EigenLogLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int dim = bottom[0]->num(); // --- CUDA solver initialization int work_size = 0; cusolverDnHandle_t solver_handle; cusolverDnCreate(&solver_handle); cusolverDnSgesvd_bufferSize(solver_handle, dim, dim, &work_size); // --- CUDA SVD execution float *work; int *devInfo; cudaMalloc(&devInfo, sizeof(int)); cudaMalloc(&work, work_size * sizeof(float)); caffe_gpu_memcpy(bottom[0]->count() * sizeof(Dtype), bottom[0]->gpu_data(), cov.mutable_gpu_data()); cusolverDnSgesvd(solver_handle, 'A', 'A', (int)dim, (int)dim, (float *)cov.mutable_gpu_data(), (int)dim, (float *)eig.mutable_gpu_data(), (float *)U.mutable_gpu_data(), (int)dim,(float *)V.mutable_gpu_data(), (int)dim, work, work_size, NULL, devInfo); cudaDeviceSynchronize(); cusolverDnDestroy(solver_handle); int i; Dtype* eig_pointer=eig.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_pointer[i] = eig_pointer[i]+0.001; caffe_log(1., &eig_pointer[i], &eig_pointer[i]); } //eig: get the log value of eigenvalue caffe_gpu_set(dim*dim, Dtype(0), eig_matx.mutable_gpu_data()); Dtype* eig_matx_pointer=eig_matx.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_matx_pointer[i*dim+i]=eig_pointer[i];} // turn log(eigenvalue) into a matrix caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),eig_matx.gpu_data(),0.,eig_matx.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.0,eig_matx.gpu_data(),U.gpu_data(),0.,top[0]->mutable_gpu_data()); // U*log(eigenvalue)*UT } template <typename Dtype> void EigenLogLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int dim = bottom[0]->num(); // --- CUDA solver initialization int work_size = 0; cusolverDnHandle_t solver_handle; cusolverDnCreate(&solver_handle); cusolverDnSgesvd_bufferSize(solver_handle, dim, dim, &work_size); // --- CUDA SVD execution float *work; int *devInfo; cudaMalloc(&devInfo, sizeof(int)); cudaMalloc(&work, work_size * sizeof(float)); caffe_gpu_memcpy(bottom[0]->count() * sizeof(Dtype), bottom[0]->gpu_data(), cov.mutable_gpu_data()); cusolverDnSgesvd(solver_handle, 'A', 'A', (int)dim, (int)dim, (float *)cov.mutable_gpu_data(), (int)dim, (float *)eig.mutable_gpu_data(), (float *)U.mutable_gpu_data(), (int)dim,(float *)V.mutable_gpu_data(), (int)dim, work, work_size, NULL, devInfo); cudaDeviceSynchronize(); cusolverDnDestroy(solver_handle); int i,j; Dtype* eig_pointer=eig.mutable_cpu_data(); Dtype* eig_log_pointer=eig_log.mutable_cpu_data(); Dtype* eig_inv_pointer=eig_inv.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_inv_pointer[i]=eig_pointer[i]; if(eig_pointer[i]!=0){ eig_inv_pointer[i]=1./eig_pointer[i];} //eig_inv_pointer: get the inverse of eigenvalue eig_log_pointer[i]=eig_pointer[i]; eig_pointer[i]= eig_pointer[i]+0.001; caffe_log(1., &eig_pointer[i], &eig_log_pointer[i]); //eig_log_pointer: get the log of eigenvalue } caffe_gpu_set(dim*dim, Dtype(0), eig_matx.mutable_gpu_data()); caffe_gpu_set(dim*dim, Dtype(0), eig_log_matx.mutable_gpu_data()); caffe_gpu_set(dim*dim, Dtype(0), eig_inv_matx.mutable_gpu_data()); caffe_gpu_set(dim*dim, Dtype(0), iden_matx.mutable_gpu_data()); Dtype* eig_matx_pointer=eig_matx.mutable_cpu_data(); Dtype* eig_log_matx_pointer=eig_log_matx.mutable_cpu_data(); Dtype* eig_inv_matx_pointer=eig_inv_matx.mutable_cpu_data(); Dtype* iden_matx_pointer=iden_matx.mutable_cpu_data(); for(i=0;i<dim;i++){ eig_matx_pointer[i*dim+i]=eig_pointer[i]; //eig_matx: get eigenvalue matrix eig_log_matx_pointer[i*dim+i]=eig_log_pointer[i]; //eig_log_matx: get log(eigenvalue) matrix eig_inv_matx_pointer[i*dim+i]=eig_inv_pointer[i]; //eig_inv_pointer: get the inverse of eigenval matrix iden_matx_pointer[i*dim+i]=1.; //iden_matrix: get identity matrix } caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),iden_matx.gpu_data(),0.,U.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans,CblasNoTrans,dim,dim,dim,0.5,top[0]->gpu_diff(),iden_matx.gpu_data(),0.,diff_sys.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,dim,dim,dim,0.5,top[0]->gpu_diff(),iden_matx.gpu_data(),1.,diff_sys.mutable_gpu_data()); // diff_sys: sys(top[0]->difference) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,2.,diff_sys.gpu_data(),U.gpu_data(),0.,dU.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,dU.gpu_data(),eig_log_matx.gpu_data(),0.,dU.mutable_gpu_data()); //dU function(7) in the paper caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,dim,dim,dim,1.,eig_inv_matx.gpu_data(),U.gpu_data(),0.,deigen.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,deigen.gpu_data(),diff_sys.gpu_data(),0.,deigen.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,deigen.gpu_data(),U.gpu_data(),0.,deigen.mutable_gpu_data()); //deigen function(8) in the paper Dtype* deigen_pointer=deigen.mutable_cpu_data(); caffe_gpu_set(dim*dim, Dtype(0), P.mutable_gpu_data()); Dtype* P_pointer=P.mutable_cpu_data(); for(i=0;i<dim;i++){ for(j=0;j<dim;j++){ if(i!=j){ if(eig_pointer[i]!=eig_pointer[j]){ P_pointer[i*dim+j]=1./(eig_pointer[i]-eig_pointer[j]); //P }} }} caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,dim,dim,dim,1.,P.mutable_gpu_data(),iden_matx.gpu_data(),0.,PT.mutable_gpu_data()); //PT: P(transpose) caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),dU.gpu_data(),0.,sys.mutable_gpu_data()); caffe_gpu_mul(dim*dim,PT.gpu_data(),sys.gpu_data(),P_sys.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,dim,dim,dim,0.5,P_sys.gpu_data(),iden_matx.gpu_data(),0.5,P_sys.mutable_gpu_data()); // (PT o (U*UT))sym for(i=0;i<dim;i++){ for(j=0;j<dim;j++){ if(i!=j){ deigen_pointer[i*dim+j]=0.; } }} // (deigen)diag caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),P_sys.gpu_data(),0.,temp1.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,dim,dim,dim,1.0,temp1.gpu_data(),U.gpu_data(),0.,temp1.mutable_gpu_data()); // U*(PT o (U*UT))sym*UT caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,dim,dim,dim,1.,U.gpu_data(),deigen.gpu_data(),0.,temp2.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,dim,dim,dim,1.0,temp2.gpu_data(),U.gpu_data(),0.,temp2.mutable_gpu_data()); // U*(deigen)diag*UT caffe_gpu_add(dim*dim,temp1.gpu_data(),temp2.gpu_data(),bottom[0]->mutable_gpu_diff()); //function(6) in the paper } INSTANTIATE_LAYER_GPU_FUNCS(EigenLogLayer); } // namespace caffe
58dbcb11c0b4e94a30afef264cc08380810dc4a1.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <sys/types.h> #include <sys/stat.h> #include <vector> #include <string> #include <cstdlib> #include "CImg.h" #include "gpuProcessImage.cu" #include "Utils.h" #include "cpuProcessImage.h" #include "Constants.h" int main(int argc, char* argv[]) { std::cout << "-------------------RUNNING IMAGE FILTERS APP ON GPU---------------------------" << std::endl; gpu::Options options; bool validArguments = parseCommandLine(argc, argv, &options); if(validArguments) { std::vector<std::string> fileList; std::string directoryPath = options.directoryPath; gpu::readDirectory(directoryPath,&fileList); std::vector<std::string>::iterator it = fileList.begin(); int count = 0; int* warmupBuffer =(int*) calloc(1000, sizeof(int)); timeval tim; double dTime1 = gpu::getTime(tim); std::cout << "Sending warm up signal to GPU." << std::endl; sendWarmUpSignal(warmupBuffer,1000*sizeof(int)); double dTime2 = gpu::getTime(tim); double warmupTime = dTime2 - dTime1; std::cout << "time taken for performing warm up: " << warmupTime << std::endl; delete[] warmupBuffer; std::cout << "Starting File I/O using CPU." << std::endl; double fileIOTime = 0.0; double configurationTime = 0.0; double executionTime = 0.0; for(;it != fileList.end();++it) { std::string imageFilename = directoryPath+*it; std::string extension = imageFilename.substr(imageFilename.length()-4,4); if(extension == JPG_EXTENSION || extension == JPEG_EXTENSION) { std::string filename = imageFilename.substr(0,imageFilename.length()-4); int indexOfSlash = imageFilename.find_last_of("/"); std::cout << "filename: " << filename << std::endl; std::cout << "Reading Image from Disk." << std::endl; dTime1 = gpu::getTime(tim); std::string outputFilename = imageFilename.substr(indexOfSlash, imageFilename.length()-4-indexOfSlash); CImg<unsigned char> image(imageFilename.c_str()); dTime2 = gpu::getTime(tim); fileIOTime += dTime2 - dTime1; std::cout << "Time taken to read from disk: " << dTime2 - dTime1 << std::endl; dTime1 = gpu::getTime(tim); std::cout << "Unrolling Image and setting up blocks and threads." << std::endl; int width = image.width(); int height = image.height(); int channels = image.spectrum(); gpu::Image imgInfo(image.width(),image.height(),image.width()*image.height(),image.spectrum()); /* <summary> 1. Allocate Buffers 2. Get Meta information from the image and assign that to ImageInfo object. 3. Copy image into Input Buffer (unroll operation). 4. Perform the operation. */ unsigned char* h_data = new unsigned char[imgInfo.spectrum*imgInfo.size]; unsigned char* h_result = new unsigned char[imgInfo.spectrum*imgInfo.size]; gpu::unrollMatrix(image,imgInfo.width,imgInfo.height,imgInfo.spectrum, h_data); int problemSize, sizeData, sizeResult; gpu::Setup setup; startSetup(width, height, channels,&problemSize, &sizeData, &sizeResult, &setup); dTime2 = gpu::getTime(tim); configurationTime += dTime2 - dTime1; std::cout << "Blocks: " << setup.blocks << ", Threads: " << setup.threads << std::endl; std::cout << "Done configuring the problem.\nTime taken: " << dTime2 - dTime1 << std::endl; dTime1 = gpu::getTime(tim); std::cout << "Starting memory allocation and data transfer from Host to Device." << std::endl; unsigned char *d_data; hipMalloc((void**)&d_data,sizeData); hipMemcpy(d_data,h_data,sizeData,hipMemcpyHostToDevice); unsigned char* d_result; hipMalloc((void**)&d_result,sizeData); dTime2 = gpu::getTime(tim); std::cout << "Done transferring data.\nTime taken: " << dTime2 - dTime1 <<std::endl; dTime1 = gpu::getTime(tim); std::cout << "Begining execution on GPU." << std::endl; int offset = width*height; std::cout << "Applying Filter..." << std::endl; int* d_kernel; std::cout << "filter size: " << options.kernelSize << std::endl; if(options.isConvolutionOp) { hipMalloc((void**)&d_kernel,options.kernelSize*options.kernelSize*sizeof(int)); hipMemcpy(d_kernel,options.convolutionKernel,options.kernelSize*options.kernelSize*sizeof(int),hipMemcpyHostToDevice); } switch(options.filterFlag) { case gpu::BRIGHTNESS: runBrightnessKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::CONTRAST: runContrastKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::CONVOLUTION: runConvolutionKernel(setup,d_data,d_result, d_kernel,options.kernelSize, width,height,channels,offset); break; case gpu::BLEND: runBlendKernel(setup,d_data,d_data,d_result, width,height,channels,offset,1.2f,options.blendMode); break; case gpu::SATURATION: runSaturationKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::SEPIA: runSepiaKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::BLACKWHITE: std::cout << "black n white kernel----------" <<std::endl; runBWKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::BRIGHTNESS_CONTRAST: runBrightnessKernel(setup,d_data,d_result, width,height,channels,offset); runContrastKernel(setup,d_result,d_result, width,height,channels,offset); break; case gpu::BLACKWHITE_BRIGHTNESS: runBWKernel(setup,d_data,d_result, width,height,channels,offset); runContrastKernel(setup,d_result,d_result, width,height,channels,offset); break; case gpu::BRIGHTNESS_SATURATION: runBrightnessKernel(setup,d_data,d_result, width,height,channels,offset); runSaturationKernel(setup,d_result,d_result, width,height,channels,offset); break; case gpu::CONTRAST_SEPIA: runContrastKernel(setup,d_data,d_result, width,height,channels,offset); runSepiaKernel(setup,d_result,d_result, width,height,channels,offset); break; } dTime2 = gpu::getTime(tim); executionTime += dTime2 - dTime1; std::cout << "Done with execution on GPU.\nTime Taken: " << dTime2 - dTime1 << std::endl; dTime1 = dTime2; hipMemcpy(h_result,d_result,sizeResult,hipMemcpyDeviceToHost); hipFree(d_data); hipFree(d_result); if(options.isConvolutionOp) { hipFree(d_kernel); } dTime2 = gpu::getTime(tim); configurationTime += dTime2 - dTime1; std::cout << "Data transferred back to Host.\nTime taken: " << dTime2 - dTime1 << std::endl; CImg<unsigned char> outputImage(h_result,imgInfo.width,imgInfo.height,1, imgInfo.spectrum,0); dTime1 = dTime2; std::cout << "Writing to Disk"<<std::endl; std::string outputDirectory = directoryPath+"output/"; int outDir = mkdir(outputDirectory.c_str(),0777); if(outDir == 0 || errno == EEXIST) { outputImage.save_jpeg((directoryPath+"/output/"+outputFilename+extension).c_str()); dTime2 = gpu::getTime(tim); fileIOTime += dTime2 - dTime1; std::cout << "Time for Disk Write: " << dTime2 - dTime1 << std::endl; } else { if(errno != EEXIST) { std::cout << "Error creating output directory" << std::endl; } } delete[] h_data; delete[] h_result; } } std::cout << "File I/O time: " << fileIOTime << std::endl; std::cout << "Configuration time: " << configurationTime << std::endl; std::cout << "Execution time: " << executionTime << std::endl; std::cout << "GPU Utilization: " << (double)executionTime/(fileIOTime+configurationTime+executionTime) << std::endl; } else { std::cout << "Usage: " << argv[0] << " -filter [optional] <image-directory>" << std::endl; std::cout << "ERROR: " << options.errorMessage << std::endl; } std::cout << "---------------------ENDING IMAGE APP ON GPU--------------------------------------" << std::endl; }
58dbcb11c0b4e94a30afef264cc08380810dc4a1.cu
#include <iostream> #include <sys/types.h> #include <sys/stat.h> #include <vector> #include <string> #include <cstdlib> #include "CImg.h" #include "gpuProcessImage.cu" #include "Utils.h" #include "cpuProcessImage.h" #include "Constants.h" int main(int argc, char* argv[]) { std::cout << "-------------------RUNNING IMAGE FILTERS APP ON GPU---------------------------" << std::endl; gpu::Options options; bool validArguments = parseCommandLine(argc, argv, &options); if(validArguments) { std::vector<std::string> fileList; std::string directoryPath = options.directoryPath; gpu::readDirectory(directoryPath,&fileList); std::vector<std::string>::iterator it = fileList.begin(); int count = 0; int* warmupBuffer =(int*) calloc(1000, sizeof(int)); timeval tim; double dTime1 = gpu::getTime(tim); std::cout << "Sending warm up signal to GPU." << std::endl; sendWarmUpSignal(warmupBuffer,1000*sizeof(int)); double dTime2 = gpu::getTime(tim); double warmupTime = dTime2 - dTime1; std::cout << "time taken for performing warm up: " << warmupTime << std::endl; delete[] warmupBuffer; std::cout << "Starting File I/O using CPU." << std::endl; double fileIOTime = 0.0; double configurationTime = 0.0; double executionTime = 0.0; for(;it != fileList.end();++it) { std::string imageFilename = directoryPath+*it; std::string extension = imageFilename.substr(imageFilename.length()-4,4); if(extension == JPG_EXTENSION || extension == JPEG_EXTENSION) { std::string filename = imageFilename.substr(0,imageFilename.length()-4); int indexOfSlash = imageFilename.find_last_of("/"); std::cout << "filename: " << filename << std::endl; std::cout << "Reading Image from Disk." << std::endl; dTime1 = gpu::getTime(tim); std::string outputFilename = imageFilename.substr(indexOfSlash, imageFilename.length()-4-indexOfSlash); CImg<unsigned char> image(imageFilename.c_str()); dTime2 = gpu::getTime(tim); fileIOTime += dTime2 - dTime1; std::cout << "Time taken to read from disk: " << dTime2 - dTime1 << std::endl; dTime1 = gpu::getTime(tim); std::cout << "Unrolling Image and setting up blocks and threads." << std::endl; int width = image.width(); int height = image.height(); int channels = image.spectrum(); gpu::Image imgInfo(image.width(),image.height(),image.width()*image.height(),image.spectrum()); /* <summary> 1. Allocate Buffers 2. Get Meta information from the image and assign that to ImageInfo object. 3. Copy image into Input Buffer (unroll operation). 4. Perform the operation. */ unsigned char* h_data = new unsigned char[imgInfo.spectrum*imgInfo.size]; unsigned char* h_result = new unsigned char[imgInfo.spectrum*imgInfo.size]; gpu::unrollMatrix(image,imgInfo.width,imgInfo.height,imgInfo.spectrum, h_data); int problemSize, sizeData, sizeResult; gpu::Setup setup; startSetup(width, height, channels,&problemSize, &sizeData, &sizeResult, &setup); dTime2 = gpu::getTime(tim); configurationTime += dTime2 - dTime1; std::cout << "Blocks: " << setup.blocks << ", Threads: " << setup.threads << std::endl; std::cout << "Done configuring the problem.\nTime taken: " << dTime2 - dTime1 << std::endl; dTime1 = gpu::getTime(tim); std::cout << "Starting memory allocation and data transfer from Host to Device." << std::endl; unsigned char *d_data; cudaMalloc((void**)&d_data,sizeData); cudaMemcpy(d_data,h_data,sizeData,cudaMemcpyHostToDevice); unsigned char* d_result; cudaMalloc((void**)&d_result,sizeData); dTime2 = gpu::getTime(tim); std::cout << "Done transferring data.\nTime taken: " << dTime2 - dTime1 <<std::endl; dTime1 = gpu::getTime(tim); std::cout << "Begining execution on GPU." << std::endl; int offset = width*height; std::cout << "Applying Filter..." << std::endl; int* d_kernel; std::cout << "filter size: " << options.kernelSize << std::endl; if(options.isConvolutionOp) { cudaMalloc((void**)&d_kernel,options.kernelSize*options.kernelSize*sizeof(int)); cudaMemcpy(d_kernel,options.convolutionKernel,options.kernelSize*options.kernelSize*sizeof(int),cudaMemcpyHostToDevice); } switch(options.filterFlag) { case gpu::BRIGHTNESS: runBrightnessKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::CONTRAST: runContrastKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::CONVOLUTION: runConvolutionKernel(setup,d_data,d_result, d_kernel,options.kernelSize, width,height,channels,offset); break; case gpu::BLEND: runBlendKernel(setup,d_data,d_data,d_result, width,height,channels,offset,1.2f,options.blendMode); break; case gpu::SATURATION: runSaturationKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::SEPIA: runSepiaKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::BLACKWHITE: std::cout << "black n white kernel----------" <<std::endl; runBWKernel(setup,d_data,d_result, width,height,channels,offset); break; case gpu::BRIGHTNESS_CONTRAST: runBrightnessKernel(setup,d_data,d_result, width,height,channels,offset); runContrastKernel(setup,d_result,d_result, width,height,channels,offset); break; case gpu::BLACKWHITE_BRIGHTNESS: runBWKernel(setup,d_data,d_result, width,height,channels,offset); runContrastKernel(setup,d_result,d_result, width,height,channels,offset); break; case gpu::BRIGHTNESS_SATURATION: runBrightnessKernel(setup,d_data,d_result, width,height,channels,offset); runSaturationKernel(setup,d_result,d_result, width,height,channels,offset); break; case gpu::CONTRAST_SEPIA: runContrastKernel(setup,d_data,d_result, width,height,channels,offset); runSepiaKernel(setup,d_result,d_result, width,height,channels,offset); break; } dTime2 = gpu::getTime(tim); executionTime += dTime2 - dTime1; std::cout << "Done with execution on GPU.\nTime Taken: " << dTime2 - dTime1 << std::endl; dTime1 = dTime2; cudaMemcpy(h_result,d_result,sizeResult,cudaMemcpyDeviceToHost); cudaFree(d_data); cudaFree(d_result); if(options.isConvolutionOp) { cudaFree(d_kernel); } dTime2 = gpu::getTime(tim); configurationTime += dTime2 - dTime1; std::cout << "Data transferred back to Host.\nTime taken: " << dTime2 - dTime1 << std::endl; CImg<unsigned char> outputImage(h_result,imgInfo.width,imgInfo.height,1, imgInfo.spectrum,0); dTime1 = dTime2; std::cout << "Writing to Disk"<<std::endl; std::string outputDirectory = directoryPath+"output/"; int outDir = mkdir(outputDirectory.c_str(),0777); if(outDir == 0 || errno == EEXIST) { outputImage.save_jpeg((directoryPath+"/output/"+outputFilename+extension).c_str()); dTime2 = gpu::getTime(tim); fileIOTime += dTime2 - dTime1; std::cout << "Time for Disk Write: " << dTime2 - dTime1 << std::endl; } else { if(errno != EEXIST) { std::cout << "Error creating output directory" << std::endl; } } delete[] h_data; delete[] h_result; } } std::cout << "File I/O time: " << fileIOTime << std::endl; std::cout << "Configuration time: " << configurationTime << std::endl; std::cout << "Execution time: " << executionTime << std::endl; std::cout << "GPU Utilization: " << (double)executionTime/(fileIOTime+configurationTime+executionTime) << std::endl; } else { std::cout << "Usage: " << argv[0] << " -filter [optional] <image-directory>" << std::endl; std::cout << "ERROR: " << options.errorMessage << std::endl; } std::cout << "---------------------ENDING IMAGE APP ON GPU--------------------------------------" << std::endl; }
ce2e7c5b6a227c43f40939e4182c4112dfe92820.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "add.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; double *a = NULL; hipMalloc(&a, XSIZE*YSIZE); double *b = NULL; hipMalloc(&b, XSIZE*YSIZE); double *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ce2e7c5b6a227c43f40939e4182c4112dfe92820.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; double *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); double *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); double *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); add<<<gridBlock,threadBlock>>>(N,a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { add<<<gridBlock,threadBlock>>>(N,a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { add<<<gridBlock,threadBlock>>>(N,a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
71bf3032a2fda2a72379fe0e7987ac16d16e4d3e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "hip/hip_runtime.h" #include "functions.c" __device__ unsigned int aaa(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } __device__ unsigned int bbb(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = aaa(aExpb, z, p); z = aaa(z, z, p); b /= 2; } return aExpb; } __global__ void ccc(unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x){ int threadid = threadIdx.x; //thread number int blockid = blockIdx.x; //block number int Nblock = blockDim.x; //number of threads in a block int id = threadid + blockid*Nblock; if (id <*p-1){ if (bbb(*g,id+1,*p)==*h) { printf("Secret key found! x = %u \n", id+1); *x = id +1; } } } int main (int argc, char **argv) { /* Part 2. Start this program by first copying the contents of the main function from your completed decrypt.c main function. */ igned int n, p, g, h, x; unsigned int Nints; printf("Enter the secret key (0 if unknown): "); fflush(stdout); char stat = scanf("%u",&x); printf("Reading file.\n"); FILE *mess; mess = fopen("message.txt","r"); FILE *key; key = fopen("public_key.txt","r"); fscanf(key,"%u%u%u%u", &n, &p, &g, &h); fscanf(mess, "%u\n", &Nints); unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); for (unsigned int i=0;i<Nints;i++) { fscanf(mess,"%u %u\n", &Zmessage[i], &a[i]); } fclose(mess); fclose(key); if (x==0 || modExp(g,x,p)!=h) { printf("Finding the secret key...\n"); double startTime = clock(); unsigned int *pp, *gg, *hh, *xx; hipMalloc(&pp, sizeof(unsigned int)); hipMalloc(&gg, sizeof(unsigned int)); hipMalloc(&hh, sizeof(unsigned int)); hipMalloc(&xx, sizeof(unsigned int)); hipMemcpy(pp, &p, sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(gg, &g, sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(hh, &h, sizeof(unsigned int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( ccc), dim3((p+1022)/1024), dim3(1024), 0, 0, pp, gg, hh, xx); hipMemcpy(&x, xx, sizeof(unsigned int), hipMemcpyDeviceToHost); } double endTime = clock(); double totalTime = (endTime-startTime)/CLOCKS_PER_SEC; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); } int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); ElGamalDecrypt(Zmessage,a,Nints,p,x); unsigned int cpi = (n-1)/8; convertZToString(Zmessage, Nints, message, Nints*cpi); /* Q4 Make the search for the secret key parallel on the GPU using CUDA. */ return 0; }
71bf3032a2fda2a72379fe0e7987ac16d16e4d3e.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "cuda.h" #include "functions.c" __device__ unsigned int aaa(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } __device__ unsigned int bbb(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = aaa(aExpb, z, p); z = aaa(z, z, p); b /= 2; } return aExpb; } __global__ void ccc(unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x){ int threadid = threadIdx.x; //thread number int blockid = blockIdx.x; //block number int Nblock = blockDim.x; //number of threads in a block int id = threadid + blockid*Nblock; if (id <*p-1){ if (bbb(*g,id+1,*p)==*h) { printf("Secret key found! x = %u \n", id+1); *x = id +1; } } } int main (int argc, char **argv) { /* Part 2. Start this program by first copying the contents of the main function from your completed decrypt.c main function. */ igned int n, p, g, h, x; unsigned int Nints; printf("Enter the secret key (0 if unknown): "); fflush(stdout); char stat = scanf("%u",&x); printf("Reading file.\n"); FILE *mess; mess = fopen("message.txt","r"); FILE *key; key = fopen("public_key.txt","r"); fscanf(key,"%u%u%u%u", &n, &p, &g, &h); fscanf(mess, "%u\n", &Nints); unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); for (unsigned int i=0;i<Nints;i++) { fscanf(mess,"%u %u\n", &Zmessage[i], &a[i]); } fclose(mess); fclose(key); if (x==0 || modExp(g,x,p)!=h) { printf("Finding the secret key...\n"); double startTime = clock(); unsigned int *pp, *gg, *hh, *xx; cudaMalloc(&pp, sizeof(unsigned int)); cudaMalloc(&gg, sizeof(unsigned int)); cudaMalloc(&hh, sizeof(unsigned int)); cudaMalloc(&xx, sizeof(unsigned int)); cudaMemcpy(pp, &p, sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(gg, &g, sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(hh, &h, sizeof(unsigned int), cudaMemcpyHostToDevice); ccc<<<(p+1022)/1024, 1024>>>(pp, gg, hh, xx); cudaMemcpy(&x, xx, sizeof(unsigned int), cudaMemcpyDeviceToHost); } double endTime = clock(); double totalTime = (endTime-startTime)/CLOCKS_PER_SEC; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); } int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); ElGamalDecrypt(Zmessage,a,Nints,p,x); unsigned int cpi = (n-1)/8; convertZToString(Zmessage, Nints, message, Nints*cpi); /* Q4 Make the search for the secret key parallel on the GPU using CUDA. */ return 0; }
0a25cbc767cd9ab40d909c86108054612dfd2548.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file CUDAJDSUtils.cpp * * @license * Copyright (c) 2009-2013 * Fraunhofer Institute for Algorithms and Scientific Computing SCAI * for Fraunhofer-Gesellschaft * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * @endlicense * * @brief Implementation of JDS utilities with CUDA * @author Bea Hornef, Thomas Brandes * @date 04.07.2012 * @since 1.0.0 */ // hpp #include <lama/cuda/utils.cu.h> // others #include <lama/cuda/CUDAStreamSyncToken.hpp> #include <lama/cuda/CUDAError.hpp> #include <lama/cuda/CUDATexture.hpp> #include <lama/cuda/CUDAJDSUtils.hpp> #include <lama/cuda/CUDAUtils.hpp> #include <lama/exception/LAMAAssert.hpp> #include <lama/tracing.hpp> #include <lama/LAMAInterface.hpp> #include <lama/LAMAInterfaceRegistry.hpp> // macros #include <lama/macros/unused.hpp> // thrust #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> //#include <thrust/gather.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/sort.h> #include <thrust/transform_reduce.h> #include <thrust/tuple.h> namespace lama { LAMA_LOG_DEF_LOGGER( CUDAJDSUtils::logger, "CUDA.JDSUtils" ) /* ------------------------------------------------------------------------------------------------------------------ */ /* thrust functors */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename T> struct identity { const T x; identity( T _x ) : x( _x ) { } __host__ __device__ T operator()( thrust::tuple<T,T> y ) { if ( thrust::get < 0 > ( y ) == x ) { return thrust::get < 1 > ( y ); } return 0; } }; template<typename T> struct greaterThan { const T x; greaterThan( T _x ) : x( _x ) { } __host__ __device__ T operator()( T y ) { return y > x; } }; /* ------------------------------------------------------------------------------------------------------------------ */ /* setDiagonalWithScalar */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType> void CUDAJDSUtils::setDiagonalWithScalar( const IndexType numDiagonal, ValueType values[], Scalar scalar ) { LAMA_LOG_INFO( logger, "setDiagonalWithScalar with numDiagonal = " << numDiagonal << " and scalar = " << scalar ) LAMA_CHECK_CUDA_ACCESS ValueType value = scalar.getValue<ValueType>(); thrust::device_ptr<ValueType> valuesPtr( const_cast<ValueType*>( values ) ); thrust::fill( valuesPtr, valuesPtr + numDiagonal, value ); } /* ------------------------------------------------------------------------------------------------------------------ */ /* getRow */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType,typename OtherValueType> __global__ void getRowKernel( OtherValueType *row, const IndexType i, const IndexType *ilg, const IndexType *dlg, const IndexType *ja, const ValueType *values ) { IndexType offset = 0; for ( IndexType j = 0; j < ilg[i]; j++ ) { row[ja[i + offset]] = static_cast<OtherValueType>( values[i + offset] ); offset += dlg[j]; } } template<typename ValueType,typename OtherValueType> void CUDAJDSUtils::getRow( OtherValueType row[], const IndexType i, const IndexType numColumns, const IndexType numRows, const IndexType perm[], const IndexType ilg[], const IndexType dlg[], const IndexType ja[], const ValueType values[] ) { LAMA_LOG_INFO( logger, "getRow with i = " << i << ", numColumns = " << numColumns << " and numRows = " << numRows ) LAMA_CHECK_CUDA_ACCESS thrust::device_ptr<OtherValueType> rowPtr( const_cast<OtherValueType*>( row ) ); thrust::device_ptr<IndexType> permPtr( const_cast<IndexType*>( perm ) ); thrust::fill( rowPtr, rowPtr + numColumns, static_cast<OtherValueType>( 0 ) ); thrust::counting_iterator<IndexType> sequence( 0 ); // correct index with permutation array IndexType ii = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( permPtr, sequence ) ), thrust::make_zip_iterator( thrust::make_tuple( permPtr + numRows, sequence + numRows ) ), identity<IndexType>( i ), 0, thrust::plus<IndexType>() ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( 1, dimBlock.x ); //TODO: find better CUDA / Thrust implementation hipLaunchKernelGGL(( getRowKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, row, ii, ilg, dlg, ja, values ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* getValue */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType> __global__ void getValueKernel( const IndexType i, const IndexType j, const IndexType numRows, const IndexType* dlg, const IndexType* ilg, const IndexType* perm, const IndexType* ja, const ValueType* values, ValueType* result ) { const int tId = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( tId == 0 ) { IndexType ii; // check the permutation of row i for ( ii = 0; ii < numRows; ii++ ) { if ( perm[ii] == i ) { break; } } IndexType k = 0; bool found = false; for ( IndexType jj = 0; jj < ilg[ii]; jj++ ) { if ( ja[ii + k] == j ) { result[0] = values[ii + k]; found = true; break; } k += dlg[jj]; } if ( !found ) { result[0] = 0.0; } } } template<typename ValueType,typename NoType> ValueType CUDAJDSUtils::getValue( const IndexType i, const IndexType j, const IndexType numRows, const IndexType* dlg, const IndexType* ilg, const IndexType* perm, const IndexType* ja, const ValueType* values ) { LAMA_CHECK_CUDA_ACCESS thrust::device_ptr<ValueType> resultPtr = thrust::device_malloc < ValueType > ( 1 ); ValueType *resultRawPtr = thrust::raw_pointer_cast( resultPtr ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( 1, dimBlock.x ); //TODO: find better CUDA / Thrust implementation hipLaunchKernelGGL(( getValueKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, i, j, numRows, dlg, ilg, perm, ja, values, resultRawPtr ); thrust::host_vector<ValueType> resultHost( resultPtr, resultPtr + 1 ); return resultHost[0]; } /* ------------------------------------------------------------------------------------------------------------------ */ /* scaleValue */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType,typename OtherValueType> __global__ void scaleValueKernel( const IndexType numRows, const IndexType *perm, const IndexType *ilg, const IndexType *dlg, ValueType *mValues, const OtherValueType *values ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numRows ) { IndexType offset = i; OtherValueType value = values[perm[i]]; for ( IndexType j = 0; j < ilg[i]; j++ ) { mValues[offset] *= static_cast<ValueType>( value ); offset += dlg[j]; } } } template<typename ValueType,typename OtherValueType> void CUDAJDSUtils::scaleValue( const IndexType numRows, const IndexType perm[], const IndexType ilg[], const IndexType dlg[], ValueType mValues[], const OtherValueType values[] ) { LAMA_LOG_INFO( logger, "scaleValue with numRows = " << numRows ) LAMA_CHECK_CUDA_ACCESS const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); hipLaunchKernelGGL(( scaleValueKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, numRows, perm, ilg, dlg, mValues, values ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* checkDiagonalProperty */ /* ------------------------------------------------------------------------------------------------------------------ */ __global__ void checkDiagonalPropertyKernel( const IndexType numRows, bool *result, const IndexType *perm, const IndexType *ja ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numRows ) { result[i] = ( ja[i] == perm[i] ); } } bool CUDAJDSUtils::checkDiagonalProperty( const IndexType numDiagonals, const IndexType numRows, const IndexType numColumns, const IndexType perm[], const IndexType ja[], const IndexType dlg[] ) { LAMA_LOG_INFO( logger, "checkDiagonalProperty with numDiagonals = " << numDiagonals << ", numRows = " << numRows << " and numColumns = " << numColumns ) LAMA_CHECK_CUDA_ACCESS if ( numRows <= 0 ) { return false; } // now it is sure that dlg, perm and ja are not empty thrust::device_ptr<IndexType> dlgPtr( const_cast<IndexType*>( dlg ) ); thrust::host_vector<IndexType> firstDlg( dlgPtr, dlgPtr + 1 ); if ( firstDlg[0] < ::min( numDiagonals, numColumns ) ) { return false; } thrust::device_ptr<bool> resultPtr = thrust::device_malloc<bool>( numRows ); thrust::fill( resultPtr, resultPtr + numRows, false ); bool *resultRawPtr = thrust::raw_pointer_cast( resultPtr ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); hipLaunchKernelGGL(( checkDiagonalPropertyKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, numRows, resultRawPtr, perm, ja ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR return thrust::reduce( resultPtr, resultPtr + numRows, true, thrust::logical_and<bool>() ); } /* ------------------------------------------------------------------------------------------------------------------ */ /* check */ /* ------------------------------------------------------------------------------------------------------------------ */ __global__ void checkDescendingKernel( const IndexType n, const IndexType *array, bool *result ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i == 0 ) { if ( n > 1 ) { for ( IndexType i = 1; i < n; i++ ) { if ( array[i] > array[i - 1] ) { result[0] = false; } } } } } bool CUDAJDSUtils::check( const IndexType numRows, const IndexType numValues, const IndexType numColumns, const IndexType ja[], const IndexType ilg[], const IndexType dlg[] ) { LAMA_LOG_INFO( logger, "check with numValues = " << numValues << ", numColumns = " << numColumns ) LAMA_CHECK_CUDA_ACCESS if ( numRows > 0 ) { thrust::device_ptr<IndexType> jaPtr( const_cast<IndexType*>( ja ) ); bool error = false; error = thrust::transform_reduce( jaPtr, jaPtr + numValues, greaterThan<IndexType>( numColumns ), 0, thrust::logical_or<bool>() ); if ( error ) { return false; } thrust::device_ptr<bool> resultPtr = thrust::device_malloc<bool>( 1 ); thrust::fill( resultPtr, resultPtr + 1, true ); bool *resultRawPtr = thrust::raw_pointer_cast( resultPtr ); thrust::device_ptr<IndexType> ilgPtr( const_cast<IndexType*>( ilg ) ); thrust::host_vector<IndexType> ilgHost( ilgPtr, ilgPtr + 1 ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( 1, dimBlock.x ); { hipLaunchKernelGGL(( checkDescendingKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, numRows, ilg, resultRawPtr ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR thrust::host_vector<IndexType> result( resultPtr, resultPtr + 1 ); if ( !result[0] ) { return false; } } { hipLaunchKernelGGL(( checkDescendingKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ilgHost[0], dlg, resultRawPtr ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR thrust::host_vector<IndexType> result( resultPtr, resultPtr + 1 ); if ( !result[0] ) { return false; } } IndexType dlgSum = CUDAUtils::sum( dlg, ilgHost[0] ); IndexType ilgSum = CUDAUtils::sum( ilg, numRows ); if ( dlgSum != ilgSum ) { return false; } } return true; } /* ------------------------------------------------------------------------------------------------------------------ */ /* ilg2dlg */ /* ------------------------------------------------------------------------------------------------------------------ */ __global__ void ilg2dlgKernel( IndexType *dlg, const IndexType numDiagonals, const IndexType *ilg, const IndexType numRows ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numDiagonals ) { for ( IndexType j = 0; j < numRows; j++ ) { if ( ilg[j] > i ) { dlg[i]++; } } } } IndexType CUDAJDSUtils::ilg2dlg( IndexType dlg[], const IndexType numDiagonals, const IndexType ilg[], const IndexType numRows ) { LAMA_LOG_INFO( logger, "ilg2dlg with numDiagonals = " << numDiagonals << ", numRows = " << numRows ) LAMA_CHECK_CUDA_ACCESS if ( numDiagonals == 0 ) { return 0; } // create device pointers and ilg sum thrust::device_ptr<IndexType> dlgPtr( const_cast<IndexType*>( dlg ) ); thrust::device_ptr<IndexType> ilgPtr( const_cast<IndexType*>( ilg ) ); thrust::fill( dlgPtr, dlgPtr + numDiagonals, 0 ); IndexType sumIlg = thrust::reduce( ilgPtr, ilgPtr + numRows, 0, thrust::plus<IndexType>() ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numDiagonals, dimBlock.x ); hipLaunchKernelGGL(( ilg2dlgKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dlg, numDiagonals, ilg, numRows ); LAMA_CHECK_CUDA_ERROR return sumIlg; } /* ------------------------------------------------------------------------------------------------------------------ */ /* sortRows */ /* ------------------------------------------------------------------------------------------------------------------ */ void CUDAJDSUtils::sortRows( IndexType array[], IndexType perm[], const IndexType n ) { LAMA_LOG_INFO( logger, "sort " << n << " rows by sizes" ) // Note: this solution does not work on Tesla cards (doesent it?) LAMA_CHECK_CUDA_ACCESS thrust::device_ptr<IndexType> array_d( const_cast<IndexType*>( array ) ); thrust::device_ptr<IndexType> perm_d( const_cast<IndexType*>( perm ) ); thrust::stable_sort_by_key( array_d, array_d + n, perm_d, thrust::greater<IndexType>() ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* setCSRValues */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename T1,typename T2> __global__ void csr2jdsKernel( int* jdsJa, T1* jdsValues, const int* const jdsDlg, const int* const jdsIlg, const int* const jdsPerm, const int nrows, const int* const csrIa, const int* const csrJa, const T2* const csrValues ) { const int index = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( index < nrows ) { int i = jdsPerm[index]; int offset = index; for ( int jdsJJ = 0, csrJJ = csrIa[i]; jdsJJ < jdsIlg[index]; jdsJJ++, csrJJ++ ) { jdsJa[offset] = csrJa[csrJJ]; jdsValues[offset] = csrValues[csrJJ]; offset += jdsDlg[jdsJJ]; // there is next value for row } } } template<typename JDSValueType,typename CSRValueType> void CUDAJDSUtils::setCSRValues( IndexType jdsJA[], JDSValueType jdsValues[], const IndexType numRows, const IndexType jdsPerm[], const IndexType jdsILG[], const IndexType jdsDLG[], const IndexType csrIA[], const IndexType csrJA[], const CSRValueType csrValues[] ) { // convert CSR data to JDS, ja and values LAMA_REGION( "CUDA.JDS<-CSR_values" ) LAMA_LOG_INFO( logger, "convert CSR to JDS, #rows = " << numRows ) LAMA_CHECK_CUDA_ACCESS const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); hipLaunchKernelGGL(( csr2jdsKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, jdsJA, jdsValues, jdsDLG, jdsILG, jdsPerm, numRows, csrIA, csrJA, csrValues ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* setInversePerm */ /* ------------------------------------------------------------------------------------------------------------------ */ void CUDAJDSUtils::setInversePerm( IndexType inversePerm[], const IndexType perm[], const IndexType n ) { LAMA_LOG_INFO( logger, "compute inverse perm, n = " << n ) LAMA_CHECK_CUDA_ACCESS if ( n > 0 ) { thrust::device_ptr<IndexType> inversePermPtr( const_cast<IndexType*>( inversePerm ) ); thrust::device_ptr<IndexType> permPtr( const_cast<IndexType*>( perm ) ); thrust::counting_iterator<IndexType> sequence( 0 ); thrust::scatter( sequence, sequence + n, permPtr, inversePermPtr ); LAMA_CHECK_CUDA_ERROR } } /* ------------------------------------------------------------------------------------------------------------------ */ /* getCSRValues */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename JDSValueType,typename CSRValueType> __global__ void jds2csrKernel( IndexType *csrJA, CSRValueType *csrValues, const IndexType *csrIA, const IndexType numRows, const IndexType *jdsInversePerm, const IndexType *jdsILG, const IndexType *jdsDLG, const IndexType *jdsJA, const JDSValueType *jdsValues ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numRows ) { IndexType ii = jdsInversePerm[i]; // where to find row i in JDS storage const IndexType numValuesInRow = jdsILG[ii]; IndexType jdsOffset = ii; // run through input JDS data IndexType offset = csrIA[i]; // run through output data for ( IndexType jj = 0; jj < numValuesInRow; jj++ ) { csrJA[offset + jj] = jdsJA[jdsOffset]; csrValues[offset + jj] = static_cast<CSRValueType>( jdsValues[jdsOffset] ); jdsOffset += jdsDLG[jj]; } } } template<typename JDSValueType,typename CSRValueType> void CUDAJDSUtils::getCSRValues( IndexType csrJA[], CSRValueType csrValues[], const IndexType csrIA[], const IndexType numRows, const IndexType jdsInversePerm[], const IndexType jdsILG[], const IndexType jdsDLG[], const IndexType jdsJA[], const JDSValueType jdsValues[] ) { LAMA_REGION( "CUDA.JDS->CSR_values" ) LAMA_LOG_INFO( logger, "get CSRValues<" << typeid( JDSValueType ).name() << ", " << typeid( CSRValueType ).name() << ">" << ", #rows = " << numRows ) LAMA_CHECK_CUDA_ACCESS const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR hipLaunchKernelGGL(( jds2csrKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, csrJA, csrValues, csrIA, numRows, jdsInversePerm, jdsILG, jdsDLG, jdsJA, jdsValues ); hipStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* xxxxx */ /* ------------------------------------------------------------------------------------------------------------------ */ /* --------------------------------------------------------------------------- */ /* Jacobi */ /* --------------------------------------------------------------------------- */ texture<float,1> texJDSSXref; texture<int2,1> texJDSDXref; texture<int,1> texJDSdlgRef; /* --------------------------------------------------------------------------- */ template<typename T,bool useTexture> __inline__ __device__ T fetch_JDSx( const T* const x, const int i ) { return x[i]; } template<bool useTexture,bool useSharedMemory> __inline__ __device__ int fetch_JDSdlg( const int* const dlg_d, int[], const int i ) { return dlg_d[i]; } template<> __inline__ __device__ float fetch_JDSx<float,true>( const float* const, const int i ) { return tex1Dfetch( texJDSSXref, i ); } template<> __inline__ __device__ double fetch_JDSx<double,true>( const double* const, const int i ) { int2 v = tex1Dfetch( texJDSDXref, i ); return __hiloint2double( v.y, v.x ); } template<> __inline__ __device__ int fetch_JDSdlg<true,false>( const int* const, int[], const int i ) { return tex1Dfetch( texJDSdlgRef, i ); } template<> __inline__ __device__ int fetch_JDSdlg<true,true>( const int* const, int dlg_sm[], const int i ) { return dlg_sm[i]; } template<> __inline__ __device__ int fetch_JDSdlg<false,true>( const int* const, int dlg_sm[], const int i ) { return dlg_sm[i]; } template<typename T,bool useTexture,bool useSharedMem> __global__ void jds_jacobi_kernel( const T* const jdsValues, const int* const jdsDlg, const int ndlg, const int* const jdsIlg, const int* const jdsJA, const int* const jdsPerm, const int numRows, const T* const rhs, T* const solution, const T* const oldSolution, const T omega ) { extern __shared__ int dlg[]; const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( useSharedMem ) { int k = threadIdx.x; while ( k < ndlg ) { dlg[k] = jdsDlg[k]; k += blockDim.x; } __syncthreads(); } if ( i < numRows ) { const int perm = jdsPerm[i]; T temp = rhs[perm]; const T aDiag = jdsValues[i]; int pos = i + fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, 0 ); const int rowEnd = jdsIlg[i]; for ( int jj = 1; jj < rowEnd; ++jj ) { temp -= jdsValues[pos] * fetch_JDSx<T,useTexture>( oldSolution, jdsJA[pos] ); pos += fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, jj ); } if ( omega == 0.5 ) { solution[perm] = omega * ( fetch_JDSx<T,useTexture>( oldSolution, perm ) + temp / aDiag ); } else if ( omega == 1.0 ) { solution[perm] = temp / aDiag; } else { solution[perm] = omega * ( temp / aDiag ) + ( 1.0 - omega ) * fetch_JDSx<T,useTexture>( oldSolution, perm ); } } } template<typename ValueType> void CUDAJDSUtils::jacobi( ValueType solution[], const IndexType numRows, const IndexType jdsPerm[], const IndexType jdsIlg[], const IndexType ndlg, const IndexType jdsDlg[], const IndexType jdsJA[], const ValueType jdsValues[], const ValueType oldSolution[], const ValueType rhs[], const ValueType omega, SyncToken* syncToken ) { LAMA_REGION( "CUDA.JDS.jacobi" ) hipStream_t stream = 0; LAMA_LOG_INFO( logger, "jacobi<" << typeid(ValueType).name() << ">" << ", #rows = " << numRows << ", omega = " << omega ) LAMA_CHECK_CUDA_ACCESS if ( syncToken ) { CUDAStreamSyncToken* cudaStreamSyncToken = dynamic_cast<CUDAStreamSyncToken*>( syncToken ); LAMA_ASSERT_DEBUG( cudaStreamSyncToken, "no cuda stream sync token provided" ) stream = cudaStreamSyncToken->getCUDAStream(); } const int block_size = ( numRows > 8191 ? 256 : 128 ); dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); bool useTexture = CUDATexture::useTexture(); useTexture = false; // not yet tested if ( syncToken ) { // asycnronous operation not supported with textures ( free must be done dynamically ) useTexture = false; } const bool useSharedMem = false; // maybe optimize later LAMA_LOG_DEBUG( logger, "useTexture = " << useTexture << ", useSharedMem = " << useSharedMem ) if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSDXref, oldSolution ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ) } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSSXref, oldSolution ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSdlgRef, jdsDlg ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_kernel<ValueType, true, false>, hipFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_kernel<ValueType, true, true>,hipFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } else { if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_kernel<ValueType, false, false>,hipFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_kernel<ValueType, false, true>, hipFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } if ( useTexture ) { if ( !useSharedMem ) { hipLaunchKernelGGL(( jds_jacobi_kernel<ValueType, true, false>) , dim3(dimGrid), dim3(dimBlock), 0, stream, jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega ); } else { const int sharedMemSize = ndlg * sizeof(int); hipLaunchKernelGGL(( jds_jacobi_kernel<ValueType, true, true>) , dim3(dimGrid), dim3(dimBlock), sharedMemSize, stream, jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega ); } } else { if ( !useSharedMem ) { hipLaunchKernelGGL(( jds_jacobi_kernel<ValueType, false, false>) , dim3(dimGrid), dim3(dimBlock), 0, stream, jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega ); } else { const int sharedMemSize = ndlg * sizeof(int); hipLaunchKernelGGL(( jds_jacobi_kernel<ValueType, false, true>) , dim3(dimGrid), dim3(dimBlock), sharedMemSize, stream, jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega); } } LAMA_CUDA_RT_CALL( hipGetLastError(), "LAMA_STATUS_SJDSJACOBI_CUDAKERNEL_FAILED" ); if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSDXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSSXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSdlgRef ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } } if ( !syncToken ) { hipStreamSynchronize( stream ); } } /* --------------------------------------------------------------------------- */ /* Jacobi halo */ /* --------------------------------------------------------------------------- */ template<typename T,bool useTexture,bool useSharedMem> __global__ void jds_jacobi_halo_kernel( const T* const diagonal, const T* const jdsValuesHalo, const int* const jdsDlgHalo, const int ndlg_halo, const int* const jdsIlgHalo, const int* const jdsJAHalo, const int* const jdsPermHalo, T* const solutionLocal, const T* const oldSolutionHalo, const T omega ) { extern __shared__ int dlg[]; const int id = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( useSharedMem ) { int k = threadIdx.x; while ( k < ndlg_halo ) { dlg[k] = jdsDlgHalo[k]; k += blockDim.x; } __syncthreads(); } if ( id < fetch_JDSdlg<useTexture,useSharedMem>( jdsDlgHalo, dlg, 0 ) ) { T temp = 0.0; int pos = id; const int rowEnd = jdsIlgHalo[id]; const int perm = jdsPermHalo[id]; for ( int jj = 0; jj < rowEnd; ++jj ) { temp += jdsValuesHalo[pos] * fetch_JDSx<T,useTexture>( oldSolutionHalo, jdsJAHalo[pos] ); pos += fetch_JDSdlg<useTexture,useSharedMem>( jdsDlgHalo, dlg, jj ); } const T aDiag = diagonal[perm]; solutionLocal[perm] -= temp * omega / aDiag; } } template<typename ValueType> void CUDAJDSUtils::jacobiHalo( ValueType solutionLocal[], const IndexType numRows, const ValueType diagonal[], const IndexType ndlg_halo, const IndexType jdsPermHalo[], const IndexType jdsIlgHalo[], const IndexType jdsDlgHalo[], const IndexType jdsJAHalo[], const ValueType jdsValuesHalo[], const ValueType oldSolutionHalo[], const ValueType omega, SyncToken* UNUSED(syncToken) ) { LAMA_REGION( "CUDA.JDS.jacobiHalo" ) LAMA_LOG_INFO( logger, "jacobiHalo<" << typeid(ValueType).name() << ">" << ", #rows = " << numRows << ", omega = " << omega ) LAMA_CHECK_CUDA_ACCESS const int block_size = ( numRows > 8191 ? 256 : 128 ) / 2; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); // TODO:numRows is too much... bool useTexture = CUDATexture::useTexture(); useTexture = false; // not yet tested const bool useSharedMem = false; // maybe optimize later LAMA_LOG_DEBUG( logger, "useTexture = " << useTexture << ", useSharedMem = " << useSharedMem ) if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSDXref, oldSolutionHalo ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ) } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSSXref, oldSolutionHalo ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSdlgRef, jdsDlgHalo ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, true, false>, hipFuncCachePreferL1), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, true, true>,hipFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } else { if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, false, false>, hipFuncCachePreferL1), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( hipFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, false, true>,hipFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } if ( useTexture ) { if ( !useSharedMem ) { hipLaunchKernelGGL(( jds_jacobi_halo_kernel<ValueType, true, false>) , dim3(dimGrid),dim3(dimBlock),0, 0, diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } else { const int sharedMemSize = ndlg_halo * sizeof(int); hipLaunchKernelGGL(( jds_jacobi_halo_kernel<ValueType, true, true>) , dim3(dimGrid),dim3(dimBlock),sharedMemSize, 0, diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } } else { if ( !useSharedMem ) { hipLaunchKernelGGL(( jds_jacobi_halo_kernel<ValueType, false, false>) , dim3(dimGrid),dim3(dimBlock), 0, 0, diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } else { const int sharedMemSize = ndlg_halo * sizeof(int); hipLaunchKernelGGL(( jds_jacobi_halo_kernel<ValueType, false, true>) , dim3(dimGrid),dim3(dimBlock),sharedMemSize, 0, diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } } LAMA_CUDA_RT_CALL( hipGetLastError(), "LAMA_STATUS_CSRJACOBIHALO_CUDAKERNEL_FAILED" ); LAMA_CUDA_RT_CALL( hipStreamSynchronize(0), "LAMA_STATUS_CSRJACOBIHALO_CUDAKERNEL_FAILED" ); if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSDXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSSXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSdlgRef ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } } } /* --------------------------------------------------------------------------- */ template<typename ValueType,bool useTexture,bool useSharedMem> __global__ void jdsgemvKernel( IndexType n, const ValueType alpha, const ValueType* const jdsValues, const IndexType* const jdsDlg, const IndexType ndlg, const IndexType* const jdsIlg, const IndexType* jdsJA, const IndexType* jdsPerm, const ValueType* x_d, const ValueType beta, const ValueType* y_d, ValueType* const result_d ) { extern __shared__ IndexType dlg[]; const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( useSharedMem ) { int k = threadIdx.x; while ( k < ndlg ) { dlg[k] = jdsDlg[k]; k += blockDim.x; } __syncthreads(); } if ( i < n ) { IndexType perm = jdsPerm[i]; ValueType summand = 0.0; if ( beta != 0.0 ) { summand = beta * y_d[perm]; } ValueType value = 0.0; int k = i; for ( int jj = 0; jj < jdsIlg[i]; ++jj ) { IndexType j = jdsJA[k]; value += jdsValues[k] * fetch_JDSx<ValueType,useTexture>( x_d, j ); k += fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, jj ); } // for ( int jj = 0; jj < ndlg; ++jj ) // { // const int incr = fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, jj ); // if ( i < incr ) // { // IndexType j = jdsJA[k]; // value += jdsValues[k] * fetch_JDSx<ValueType,useTexture>( x_d, j ); // k += incr; // } // else // { // break; // } // } result_d[perm] = alpha * value + summand; } } /* --------------------------------------------------------------------------- */ template<typename ValueType> void CUDAJDSUtils::normalGEMV( ValueType result[], const ValueType alpha, const ValueType x[], const ValueType beta, const ValueType y[], const IndexType numRows, const IndexType jdsPerm[], const IndexType jdsILG[], const IndexType ndlg, const IndexType jdsDLG[], const IndexType jdsJA[], const ValueType jdsValues[], SyncToken* /* syncToken */) { LAMA_REGION( "CUDA.JDS.normalGEMV" ) LAMA_LOG_INFO( logger, "normalGEMV<" << typeid(ValueType).name() << ">" << ", #rows = " << numRows ) LAMA_LOG_INFO( logger, "alpha = " << alpha << ", x = " << x << ", beta = " << beta << ", y = " << y << ", result = " << result ) const bool useTexture = false; // still problems: CUDATexture::useTexture(); const bool useSharedMem = false; // maybe optimize later LAMA_LOG_DEBUG( logger, "useTexture = " << useTexture << ", useSharedMem = " << useSharedMem ) const int block_size = ( numRows > 8191 ? 256 : 128 ); dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); LAMA_CHECK_CUDA_ACCESS if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSDXref, x ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSSXref, x ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } if ( useSharedMem ) { const int sharedMemSize = ndlg * sizeof(int); hipFuncSetCacheConfig( jdsgemvKernel<ValueType,true,true>, hipFuncCachePreferL1 ); hipLaunchKernelGGL(( jdsgemvKernel<ValueType, true, true>), dim3(dimGrid),dim3(dimBlock),sharedMemSize, 0, numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } else // no sharedMem { LAMA_CUDA_RT_CALL( hipBindTexture( NULL, texJDSdlgRef, jdsDLG ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); hipFuncSetCacheConfig( jdsgemvKernel<ValueType,true,false>, hipFuncCachePreferL1 ); hipLaunchKernelGGL(( jdsgemvKernel<ValueType, true, false>), dim3(dimGrid),dim3(dimBlock), 0, 0, numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } // skip the following in case of asynchronous execution LAMA_CUDA_RT_CALL( hipStreamSynchronize(0), "JDS: gemvKernel FAILED" ) if ( useSharedMem ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSdlgRef ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSDXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( hipUnbindTexture( texJDSSXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } } else // no Texture cache { if ( useSharedMem ) { const int sharedMemSize = ndlg * sizeof(int); hipFuncSetCacheConfig( jdsgemvKernel<ValueType,false,true>, hipFuncCachePreferL1 ); hipLaunchKernelGGL(( jdsgemvKernel<ValueType, false, true>), dim3(dimGrid),dim3(dimBlock),sharedMemSize, 0, numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } else // no sharedMem { hipFuncSetCacheConfig( jdsgemvKernel<ValueType,false,false>, hipFuncCachePreferL1 ); hipLaunchKernelGGL(( jdsgemvKernel<ValueType, false, false>), dim3(dimGrid),dim3(dimBlock), 0, 0, numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } } LAMA_CHECK_CUDA_ERROR hipStreamSynchronize( 0 ); } /* --------------------------------------------------------------------------- */ void CUDAJDSUtils::setInterface( JDSUtilsInterface& JDSUtils ) { LAMA_LOG_INFO( logger, "set JDS routines for CUDA in Interface" ) LAMA_INTERFACE_REGISTER( JDSUtils, sortRows ) LAMA_INTERFACE_REGISTER( JDSUtils, checkDiagonalProperty ) LAMA_INTERFACE_REGISTER( JDSUtils, check ) LAMA_INTERFACE_REGISTER( JDSUtils, ilg2dlg ) LAMA_INTERFACE_REGISTER( JDSUtils, setInversePerm ) LAMA_INTERFACE_REGISTER_T( JDSUtils, setDiagonalWithScalar, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, setDiagonalWithScalar, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, double, double ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobi, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobi, double ) LAMA_INTERFACE_REGISTER_T( JDSUtils, normalGEMV, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, normalGEMV, double ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobiHalo, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobiHalo, double ) } /* --------------------------------------------------------------------------- */ /* Static registration of the Utils routines */ /* --------------------------------------------------------------------------- */ bool CUDAJDSUtils::registerInterface() { LAMAInterface& interface = LAMAInterfaceRegistry::getRegistry().modifyInterface( Context::CUDA ); setInterface( interface.JDSUtils ); return true; } /* --------------------------------------------------------------------------- */ /* Static initialiazion at program start */ /* --------------------------------------------------------------------------- */ bool CUDAJDSUtils::initialized = registerInterface(); } // namespace lama
0a25cbc767cd9ab40d909c86108054612dfd2548.cu
/** * @file CUDAJDSUtils.cpp * * @license * Copyright (c) 2009-2013 * Fraunhofer Institute for Algorithms and Scientific Computing SCAI * for Fraunhofer-Gesellschaft * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * @endlicense * * @brief Implementation of JDS utilities with CUDA * @author Bea Hornef, Thomas Brandes * @date 04.07.2012 * @since 1.0.0 */ // hpp #include <lama/cuda/utils.cu.h> // others #include <lama/cuda/CUDAStreamSyncToken.hpp> #include <lama/cuda/CUDAError.hpp> #include <lama/cuda/CUDATexture.hpp> #include <lama/cuda/CUDAJDSUtils.hpp> #include <lama/cuda/CUDAUtils.hpp> #include <lama/exception/LAMAAssert.hpp> #include <lama/tracing.hpp> #include <lama/LAMAInterface.hpp> #include <lama/LAMAInterfaceRegistry.hpp> // macros #include <lama/macros/unused.hpp> // thrust #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> //#include <thrust/gather.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/sort.h> #include <thrust/transform_reduce.h> #include <thrust/tuple.h> namespace lama { LAMA_LOG_DEF_LOGGER( CUDAJDSUtils::logger, "CUDA.JDSUtils" ) /* ------------------------------------------------------------------------------------------------------------------ */ /* thrust functors */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename T> struct identity { const T x; identity( T _x ) : x( _x ) { } __host__ __device__ T operator()( thrust::tuple<T,T> y ) { if ( thrust::get < 0 > ( y ) == x ) { return thrust::get < 1 > ( y ); } return 0; } }; template<typename T> struct greaterThan { const T x; greaterThan( T _x ) : x( _x ) { } __host__ __device__ T operator()( T y ) { return y > x; } }; /* ------------------------------------------------------------------------------------------------------------------ */ /* setDiagonalWithScalar */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType> void CUDAJDSUtils::setDiagonalWithScalar( const IndexType numDiagonal, ValueType values[], Scalar scalar ) { LAMA_LOG_INFO( logger, "setDiagonalWithScalar with numDiagonal = " << numDiagonal << " and scalar = " << scalar ) LAMA_CHECK_CUDA_ACCESS ValueType value = scalar.getValue<ValueType>(); thrust::device_ptr<ValueType> valuesPtr( const_cast<ValueType*>( values ) ); thrust::fill( valuesPtr, valuesPtr + numDiagonal, value ); } /* ------------------------------------------------------------------------------------------------------------------ */ /* getRow */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType,typename OtherValueType> __global__ void getRowKernel( OtherValueType *row, const IndexType i, const IndexType *ilg, const IndexType *dlg, const IndexType *ja, const ValueType *values ) { IndexType offset = 0; for ( IndexType j = 0; j < ilg[i]; j++ ) { row[ja[i + offset]] = static_cast<OtherValueType>( values[i + offset] ); offset += dlg[j]; } } template<typename ValueType,typename OtherValueType> void CUDAJDSUtils::getRow( OtherValueType row[], const IndexType i, const IndexType numColumns, const IndexType numRows, const IndexType perm[], const IndexType ilg[], const IndexType dlg[], const IndexType ja[], const ValueType values[] ) { LAMA_LOG_INFO( logger, "getRow with i = " << i << ", numColumns = " << numColumns << " and numRows = " << numRows ) LAMA_CHECK_CUDA_ACCESS thrust::device_ptr<OtherValueType> rowPtr( const_cast<OtherValueType*>( row ) ); thrust::device_ptr<IndexType> permPtr( const_cast<IndexType*>( perm ) ); thrust::fill( rowPtr, rowPtr + numColumns, static_cast<OtherValueType>( 0 ) ); thrust::counting_iterator<IndexType> sequence( 0 ); // correct index with permutation array IndexType ii = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( permPtr, sequence ) ), thrust::make_zip_iterator( thrust::make_tuple( permPtr + numRows, sequence + numRows ) ), identity<IndexType>( i ), 0, thrust::plus<IndexType>() ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( 1, dimBlock.x ); //TODO: find better CUDA / Thrust implementation getRowKernel<<<dimGrid, dimBlock>>>( row, ii, ilg, dlg, ja, values ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* getValue */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType> __global__ void getValueKernel( const IndexType i, const IndexType j, const IndexType numRows, const IndexType* dlg, const IndexType* ilg, const IndexType* perm, const IndexType* ja, const ValueType* values, ValueType* result ) { const int tId = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( tId == 0 ) { IndexType ii; // check the permutation of row i for ( ii = 0; ii < numRows; ii++ ) { if ( perm[ii] == i ) { break; } } IndexType k = 0; bool found = false; for ( IndexType jj = 0; jj < ilg[ii]; jj++ ) { if ( ja[ii + k] == j ) { result[0] = values[ii + k]; found = true; break; } k += dlg[jj]; } if ( !found ) { result[0] = 0.0; } } } template<typename ValueType,typename NoType> ValueType CUDAJDSUtils::getValue( const IndexType i, const IndexType j, const IndexType numRows, const IndexType* dlg, const IndexType* ilg, const IndexType* perm, const IndexType* ja, const ValueType* values ) { LAMA_CHECK_CUDA_ACCESS thrust::device_ptr<ValueType> resultPtr = thrust::device_malloc < ValueType > ( 1 ); ValueType *resultRawPtr = thrust::raw_pointer_cast( resultPtr ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( 1, dimBlock.x ); //TODO: find better CUDA / Thrust implementation getValueKernel<<<dimGrid, dimBlock>>>( i, j, numRows, dlg, ilg, perm, ja, values, resultRawPtr ); thrust::host_vector<ValueType> resultHost( resultPtr, resultPtr + 1 ); return resultHost[0]; } /* ------------------------------------------------------------------------------------------------------------------ */ /* scaleValue */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename ValueType,typename OtherValueType> __global__ void scaleValueKernel( const IndexType numRows, const IndexType *perm, const IndexType *ilg, const IndexType *dlg, ValueType *mValues, const OtherValueType *values ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numRows ) { IndexType offset = i; OtherValueType value = values[perm[i]]; for ( IndexType j = 0; j < ilg[i]; j++ ) { mValues[offset] *= static_cast<ValueType>( value ); offset += dlg[j]; } } } template<typename ValueType,typename OtherValueType> void CUDAJDSUtils::scaleValue( const IndexType numRows, const IndexType perm[], const IndexType ilg[], const IndexType dlg[], ValueType mValues[], const OtherValueType values[] ) { LAMA_LOG_INFO( logger, "scaleValue with numRows = " << numRows ) LAMA_CHECK_CUDA_ACCESS const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); scaleValueKernel<<<dimGrid, dimBlock>>>( numRows, perm, ilg, dlg, mValues, values ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* checkDiagonalProperty */ /* ------------------------------------------------------------------------------------------------------------------ */ __global__ void checkDiagonalPropertyKernel( const IndexType numRows, bool *result, const IndexType *perm, const IndexType *ja ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numRows ) { result[i] = ( ja[i] == perm[i] ); } } bool CUDAJDSUtils::checkDiagonalProperty( const IndexType numDiagonals, const IndexType numRows, const IndexType numColumns, const IndexType perm[], const IndexType ja[], const IndexType dlg[] ) { LAMA_LOG_INFO( logger, "checkDiagonalProperty with numDiagonals = " << numDiagonals << ", numRows = " << numRows << " and numColumns = " << numColumns ) LAMA_CHECK_CUDA_ACCESS if ( numRows <= 0 ) { return false; } // now it is sure that dlg, perm and ja are not empty thrust::device_ptr<IndexType> dlgPtr( const_cast<IndexType*>( dlg ) ); thrust::host_vector<IndexType> firstDlg( dlgPtr, dlgPtr + 1 ); if ( firstDlg[0] < std::min( numDiagonals, numColumns ) ) { return false; } thrust::device_ptr<bool> resultPtr = thrust::device_malloc<bool>( numRows ); thrust::fill( resultPtr, resultPtr + numRows, false ); bool *resultRawPtr = thrust::raw_pointer_cast( resultPtr ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); checkDiagonalPropertyKernel<<<dimGrid, dimBlock>>>( numRows, resultRawPtr, perm, ja ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR return thrust::reduce( resultPtr, resultPtr + numRows, true, thrust::logical_and<bool>() ); } /* ------------------------------------------------------------------------------------------------------------------ */ /* check */ /* ------------------------------------------------------------------------------------------------------------------ */ __global__ void checkDescendingKernel( const IndexType n, const IndexType *array, bool *result ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i == 0 ) { if ( n > 1 ) { for ( IndexType i = 1; i < n; i++ ) { if ( array[i] > array[i - 1] ) { result[0] = false; } } } } } bool CUDAJDSUtils::check( const IndexType numRows, const IndexType numValues, const IndexType numColumns, const IndexType ja[], const IndexType ilg[], const IndexType dlg[] ) { LAMA_LOG_INFO( logger, "check with numValues = " << numValues << ", numColumns = " << numColumns ) LAMA_CHECK_CUDA_ACCESS if ( numRows > 0 ) { thrust::device_ptr<IndexType> jaPtr( const_cast<IndexType*>( ja ) ); bool error = false; error = thrust::transform_reduce( jaPtr, jaPtr + numValues, greaterThan<IndexType>( numColumns ), 0, thrust::logical_or<bool>() ); if ( error ) { return false; } thrust::device_ptr<bool> resultPtr = thrust::device_malloc<bool>( 1 ); thrust::fill( resultPtr, resultPtr + 1, true ); bool *resultRawPtr = thrust::raw_pointer_cast( resultPtr ); thrust::device_ptr<IndexType> ilgPtr( const_cast<IndexType*>( ilg ) ); thrust::host_vector<IndexType> ilgHost( ilgPtr, ilgPtr + 1 ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( 1, dimBlock.x ); { checkDescendingKernel<<<dimGrid, dimBlock>>>( numRows, ilg, resultRawPtr ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR thrust::host_vector<IndexType> result( resultPtr, resultPtr + 1 ); if ( !result[0] ) { return false; } } { checkDescendingKernel<<<dimGrid, dimBlock>>>( ilgHost[0], dlg, resultRawPtr ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR thrust::host_vector<IndexType> result( resultPtr, resultPtr + 1 ); if ( !result[0] ) { return false; } } IndexType dlgSum = CUDAUtils::sum( dlg, ilgHost[0] ); IndexType ilgSum = CUDAUtils::sum( ilg, numRows ); if ( dlgSum != ilgSum ) { return false; } } return true; } /* ------------------------------------------------------------------------------------------------------------------ */ /* ilg2dlg */ /* ------------------------------------------------------------------------------------------------------------------ */ __global__ void ilg2dlgKernel( IndexType *dlg, const IndexType numDiagonals, const IndexType *ilg, const IndexType numRows ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numDiagonals ) { for ( IndexType j = 0; j < numRows; j++ ) { if ( ilg[j] > i ) { dlg[i]++; } } } } IndexType CUDAJDSUtils::ilg2dlg( IndexType dlg[], const IndexType numDiagonals, const IndexType ilg[], const IndexType numRows ) { LAMA_LOG_INFO( logger, "ilg2dlg with numDiagonals = " << numDiagonals << ", numRows = " << numRows ) LAMA_CHECK_CUDA_ACCESS if ( numDiagonals == 0 ) { return 0; } // create device pointers and ilg sum thrust::device_ptr<IndexType> dlgPtr( const_cast<IndexType*>( dlg ) ); thrust::device_ptr<IndexType> ilgPtr( const_cast<IndexType*>( ilg ) ); thrust::fill( dlgPtr, dlgPtr + numDiagonals, 0 ); IndexType sumIlg = thrust::reduce( ilgPtr, ilgPtr + numRows, 0, thrust::plus<IndexType>() ); const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numDiagonals, dimBlock.x ); ilg2dlgKernel<<<dimGrid, dimBlock>>>( dlg, numDiagonals, ilg, numRows ); LAMA_CHECK_CUDA_ERROR return sumIlg; } /* ------------------------------------------------------------------------------------------------------------------ */ /* sortRows */ /* ------------------------------------------------------------------------------------------------------------------ */ void CUDAJDSUtils::sortRows( IndexType array[], IndexType perm[], const IndexType n ) { LAMA_LOG_INFO( logger, "sort " << n << " rows by sizes" ) // Note: this solution does not work on Tesla cards (doesent it?) LAMA_CHECK_CUDA_ACCESS thrust::device_ptr<IndexType> array_d( const_cast<IndexType*>( array ) ); thrust::device_ptr<IndexType> perm_d( const_cast<IndexType*>( perm ) ); thrust::stable_sort_by_key( array_d, array_d + n, perm_d, thrust::greater<IndexType>() ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* setCSRValues */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename T1,typename T2> __global__ void csr2jdsKernel( int* jdsJa, T1* jdsValues, const int* const jdsDlg, const int* const jdsIlg, const int* const jdsPerm, const int nrows, const int* const csrIa, const int* const csrJa, const T2* const csrValues ) { const int index = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( index < nrows ) { int i = jdsPerm[index]; int offset = index; for ( int jdsJJ = 0, csrJJ = csrIa[i]; jdsJJ < jdsIlg[index]; jdsJJ++, csrJJ++ ) { jdsJa[offset] = csrJa[csrJJ]; jdsValues[offset] = csrValues[csrJJ]; offset += jdsDlg[jdsJJ]; // there is next value for row } } } template<typename JDSValueType,typename CSRValueType> void CUDAJDSUtils::setCSRValues( IndexType jdsJA[], JDSValueType jdsValues[], const IndexType numRows, const IndexType jdsPerm[], const IndexType jdsILG[], const IndexType jdsDLG[], const IndexType csrIA[], const IndexType csrJA[], const CSRValueType csrValues[] ) { // convert CSR data to JDS, ja and values LAMA_REGION( "CUDA.JDS<-CSR_values" ) LAMA_LOG_INFO( logger, "convert CSR to JDS, #rows = " << numRows ) LAMA_CHECK_CUDA_ACCESS const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); csr2jdsKernel<<<dimGrid,dimBlock>>>( jdsJA, jdsValues, jdsDLG, jdsILG, jdsPerm, numRows, csrIA, csrJA, csrValues ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* setInversePerm */ /* ------------------------------------------------------------------------------------------------------------------ */ void CUDAJDSUtils::setInversePerm( IndexType inversePerm[], const IndexType perm[], const IndexType n ) { LAMA_LOG_INFO( logger, "compute inverse perm, n = " << n ) LAMA_CHECK_CUDA_ACCESS if ( n > 0 ) { thrust::device_ptr<IndexType> inversePermPtr( const_cast<IndexType*>( inversePerm ) ); thrust::device_ptr<IndexType> permPtr( const_cast<IndexType*>( perm ) ); thrust::counting_iterator<IndexType> sequence( 0 ); thrust::scatter( sequence, sequence + n, permPtr, inversePermPtr ); LAMA_CHECK_CUDA_ERROR } } /* ------------------------------------------------------------------------------------------------------------------ */ /* getCSRValues */ /* ------------------------------------------------------------------------------------------------------------------ */ template<typename JDSValueType,typename CSRValueType> __global__ void jds2csrKernel( IndexType *csrJA, CSRValueType *csrValues, const IndexType *csrIA, const IndexType numRows, const IndexType *jdsInversePerm, const IndexType *jdsILG, const IndexType *jdsDLG, const IndexType *jdsJA, const JDSValueType *jdsValues ) { const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( i < numRows ) { IndexType ii = jdsInversePerm[i]; // where to find row i in JDS storage const IndexType numValuesInRow = jdsILG[ii]; IndexType jdsOffset = ii; // run through input JDS data IndexType offset = csrIA[i]; // run through output data for ( IndexType jj = 0; jj < numValuesInRow; jj++ ) { csrJA[offset + jj] = jdsJA[jdsOffset]; csrValues[offset + jj] = static_cast<CSRValueType>( jdsValues[jdsOffset] ); jdsOffset += jdsDLG[jj]; } } } template<typename JDSValueType,typename CSRValueType> void CUDAJDSUtils::getCSRValues( IndexType csrJA[], CSRValueType csrValues[], const IndexType csrIA[], const IndexType numRows, const IndexType jdsInversePerm[], const IndexType jdsILG[], const IndexType jdsDLG[], const IndexType jdsJA[], const JDSValueType jdsValues[] ) { LAMA_REGION( "CUDA.JDS->CSR_values" ) LAMA_LOG_INFO( logger, "get CSRValues<" << typeid( JDSValueType ).name() << ", " << typeid( CSRValueType ).name() << ">" << ", #rows = " << numRows ) LAMA_CHECK_CUDA_ACCESS const int block_size = 256; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR jds2csrKernel<<<dimGrid,dimBlock>>>( csrJA, csrValues, csrIA, numRows, jdsInversePerm, jdsILG, jdsDLG, jdsJA, jdsValues ); cudaStreamSynchronize( 0 ); LAMA_CHECK_CUDA_ERROR } /* ------------------------------------------------------------------------------------------------------------------ */ /* xxxxx */ /* ------------------------------------------------------------------------------------------------------------------ */ /* --------------------------------------------------------------------------- */ /* Jacobi */ /* --------------------------------------------------------------------------- */ texture<float,1> texJDSSXref; texture<int2,1> texJDSDXref; texture<int,1> texJDSdlgRef; /* --------------------------------------------------------------------------- */ template<typename T,bool useTexture> __inline__ __device__ T fetch_JDSx( const T* const x, const int i ) { return x[i]; } template<bool useTexture,bool useSharedMemory> __inline__ __device__ int fetch_JDSdlg( const int* const dlg_d, int[], const int i ) { return dlg_d[i]; } template<> __inline__ __device__ float fetch_JDSx<float,true>( const float* const, const int i ) { return tex1Dfetch( texJDSSXref, i ); } template<> __inline__ __device__ double fetch_JDSx<double,true>( const double* const, const int i ) { int2 v = tex1Dfetch( texJDSDXref, i ); return __hiloint2double( v.y, v.x ); } template<> __inline__ __device__ int fetch_JDSdlg<true,false>( const int* const, int[], const int i ) { return tex1Dfetch( texJDSdlgRef, i ); } template<> __inline__ __device__ int fetch_JDSdlg<true,true>( const int* const, int dlg_sm[], const int i ) { return dlg_sm[i]; } template<> __inline__ __device__ int fetch_JDSdlg<false,true>( const int* const, int dlg_sm[], const int i ) { return dlg_sm[i]; } template<typename T,bool useTexture,bool useSharedMem> __global__ void jds_jacobi_kernel( const T* const jdsValues, const int* const jdsDlg, const int ndlg, const int* const jdsIlg, const int* const jdsJA, const int* const jdsPerm, const int numRows, const T* const rhs, T* const solution, const T* const oldSolution, const T omega ) { extern __shared__ int dlg[]; const int i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( useSharedMem ) { int k = threadIdx.x; while ( k < ndlg ) { dlg[k] = jdsDlg[k]; k += blockDim.x; } __syncthreads(); } if ( i < numRows ) { const int perm = jdsPerm[i]; T temp = rhs[perm]; const T aDiag = jdsValues[i]; int pos = i + fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, 0 ); const int rowEnd = jdsIlg[i]; for ( int jj = 1; jj < rowEnd; ++jj ) { temp -= jdsValues[pos] * fetch_JDSx<T,useTexture>( oldSolution, jdsJA[pos] ); pos += fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, jj ); } if ( omega == 0.5 ) { solution[perm] = omega * ( fetch_JDSx<T,useTexture>( oldSolution, perm ) + temp / aDiag ); } else if ( omega == 1.0 ) { solution[perm] = temp / aDiag; } else { solution[perm] = omega * ( temp / aDiag ) + ( 1.0 - omega ) * fetch_JDSx<T,useTexture>( oldSolution, perm ); } } } template<typename ValueType> void CUDAJDSUtils::jacobi( ValueType solution[], const IndexType numRows, const IndexType jdsPerm[], const IndexType jdsIlg[], const IndexType ndlg, const IndexType jdsDlg[], const IndexType jdsJA[], const ValueType jdsValues[], const ValueType oldSolution[], const ValueType rhs[], const ValueType omega, SyncToken* syncToken ) { LAMA_REGION( "CUDA.JDS.jacobi" ) cudaStream_t stream = 0; LAMA_LOG_INFO( logger, "jacobi<" << typeid(ValueType).name() << ">" << ", #rows = " << numRows << ", omega = " << omega ) LAMA_CHECK_CUDA_ACCESS if ( syncToken ) { CUDAStreamSyncToken* cudaStreamSyncToken = dynamic_cast<CUDAStreamSyncToken*>( syncToken ); LAMA_ASSERT_DEBUG( cudaStreamSyncToken, "no cuda stream sync token provided" ) stream = cudaStreamSyncToken->getCUDAStream(); } const int block_size = ( numRows > 8191 ? 256 : 128 ); dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); bool useTexture = CUDATexture::useTexture(); useTexture = false; // not yet tested if ( syncToken ) { // asycnronous operation not supported with textures ( free must be done dynamically ) useTexture = false; } const bool useSharedMem = false; // maybe optimize later LAMA_LOG_DEBUG( logger, "useTexture = " << useTexture << ", useSharedMem = " << useSharedMem ) if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSDXref, oldSolution ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ) } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSSXref, oldSolution ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSdlgRef, jdsDlg ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_kernel<ValueType, true, false>, cudaFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_kernel<ValueType, true, true>,cudaFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } else { if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_kernel<ValueType, false, false>,cudaFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_kernel<ValueType, false, true>, cudaFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } if ( useTexture ) { if ( !useSharedMem ) { jds_jacobi_kernel<ValueType, true, false> <<<dimGrid, dimBlock, 0, stream>>>( jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega ); } else { const int sharedMemSize = ndlg * sizeof(int); jds_jacobi_kernel<ValueType, true, true> <<<dimGrid, dimBlock, sharedMemSize, stream>>>( jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega ); } } else { if ( !useSharedMem ) { jds_jacobi_kernel<ValueType, false, false> <<<dimGrid, dimBlock, 0, stream>>>( jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega ); } else { const int sharedMemSize = ndlg * sizeof(int); jds_jacobi_kernel<ValueType, false, true> <<<dimGrid, dimBlock, sharedMemSize, stream>>>( jdsValues, jdsDlg, ndlg, jdsIlg, jdsJA, jdsPerm, numRows, rhs, solution, oldSolution, omega); } } LAMA_CUDA_RT_CALL( cudaGetLastError(), "LAMA_STATUS_SJDSJACOBI_CUDAKERNEL_FAILED" ); if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSDXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSSXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSdlgRef ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } } if ( !syncToken ) { cudaStreamSynchronize( stream ); } } /* --------------------------------------------------------------------------- */ /* Jacobi halo */ /* --------------------------------------------------------------------------- */ template<typename T,bool useTexture,bool useSharedMem> __global__ void jds_jacobi_halo_kernel( const T* const diagonal, const T* const jdsValuesHalo, const int* const jdsDlgHalo, const int ndlg_halo, const int* const jdsIlgHalo, const int* const jdsJAHalo, const int* const jdsPermHalo, T* const solutionLocal, const T* const oldSolutionHalo, const T omega ) { extern __shared__ int dlg[]; const int id = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( useSharedMem ) { int k = threadIdx.x; while ( k < ndlg_halo ) { dlg[k] = jdsDlgHalo[k]; k += blockDim.x; } __syncthreads(); } if ( id < fetch_JDSdlg<useTexture,useSharedMem>( jdsDlgHalo, dlg, 0 ) ) { T temp = 0.0; int pos = id; const int rowEnd = jdsIlgHalo[id]; const int perm = jdsPermHalo[id]; for ( int jj = 0; jj < rowEnd; ++jj ) { temp += jdsValuesHalo[pos] * fetch_JDSx<T,useTexture>( oldSolutionHalo, jdsJAHalo[pos] ); pos += fetch_JDSdlg<useTexture,useSharedMem>( jdsDlgHalo, dlg, jj ); } const T aDiag = diagonal[perm]; solutionLocal[perm] -= temp * omega / aDiag; } } template<typename ValueType> void CUDAJDSUtils::jacobiHalo( ValueType solutionLocal[], const IndexType numRows, const ValueType diagonal[], const IndexType ndlg_halo, const IndexType jdsPermHalo[], const IndexType jdsIlgHalo[], const IndexType jdsDlgHalo[], const IndexType jdsJAHalo[], const ValueType jdsValuesHalo[], const ValueType oldSolutionHalo[], const ValueType omega, SyncToken* UNUSED(syncToken) ) { LAMA_REGION( "CUDA.JDS.jacobiHalo" ) LAMA_LOG_INFO( logger, "jacobiHalo<" << typeid(ValueType).name() << ">" << ", #rows = " << numRows << ", omega = " << omega ) LAMA_CHECK_CUDA_ACCESS const int block_size = ( numRows > 8191 ? 256 : 128 ) / 2; dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); // TODO:numRows is too much... bool useTexture = CUDATexture::useTexture(); useTexture = false; // not yet tested const bool useSharedMem = false; // maybe optimize later LAMA_LOG_DEBUG( logger, "useTexture = " << useTexture << ", useSharedMem = " << useSharedMem ) if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSDXref, oldSolutionHalo ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ) } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSSXref, oldSolutionHalo ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSdlgRef, jdsDlgHalo ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, true, false>, cudaFuncCachePreferL1), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, true, true>,cudaFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } else { if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, false, false>, cudaFuncCachePreferL1), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } else { LAMA_CUDA_RT_CALL( cudaFuncSetCacheConfig( jds_jacobi_halo_kernel<ValueType, false, true>,cudaFuncCachePreferL1 ), "LAMA_STATUS_CUDA_FUNCSETCACHECONFIG_FAILED" ); } } if ( useTexture ) { if ( !useSharedMem ) { jds_jacobi_halo_kernel<ValueType, true, false> <<<dimGrid,dimBlock,0>>>( diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } else { const int sharedMemSize = ndlg_halo * sizeof(int); jds_jacobi_halo_kernel<ValueType, true, true> <<<dimGrid,dimBlock,sharedMemSize>>>( diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } } else { if ( !useSharedMem ) { jds_jacobi_halo_kernel<ValueType, false, false> <<<dimGrid,dimBlock>>>( diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } else { const int sharedMemSize = ndlg_halo * sizeof(int); jds_jacobi_halo_kernel<ValueType, false, true> <<<dimGrid,dimBlock,sharedMemSize>>>( diagonal, jdsValuesHalo, jdsDlgHalo, ndlg_halo, jdsIlgHalo, jdsJAHalo, jdsPermHalo, solutionLocal, oldSolutionHalo, omega); } } LAMA_CUDA_RT_CALL( cudaGetLastError(), "LAMA_STATUS_CSRJACOBIHALO_CUDAKERNEL_FAILED" ); LAMA_CUDA_RT_CALL( cudaStreamSynchronize(0), "LAMA_STATUS_CSRJACOBIHALO_CUDAKERNEL_FAILED" ); if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSDXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSSXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } if ( !useSharedMem ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSdlgRef ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } } } /* --------------------------------------------------------------------------- */ template<typename ValueType,bool useTexture,bool useSharedMem> __global__ void jdsgemvKernel( IndexType n, const ValueType alpha, const ValueType* const jdsValues, const IndexType* const jdsDlg, const IndexType ndlg, const IndexType* const jdsIlg, const IndexType* jdsJA, const IndexType* jdsPerm, const ValueType* x_d, const ValueType beta, const ValueType* y_d, ValueType* const result_d ) { extern __shared__ IndexType dlg[]; const IndexType i = threadId( gridDim, blockIdx, blockDim, threadIdx ); if ( useSharedMem ) { int k = threadIdx.x; while ( k < ndlg ) { dlg[k] = jdsDlg[k]; k += blockDim.x; } __syncthreads(); } if ( i < n ) { IndexType perm = jdsPerm[i]; ValueType summand = 0.0; if ( beta != 0.0 ) { summand = beta * y_d[perm]; } ValueType value = 0.0; int k = i; for ( int jj = 0; jj < jdsIlg[i]; ++jj ) { IndexType j = jdsJA[k]; value += jdsValues[k] * fetch_JDSx<ValueType,useTexture>( x_d, j ); k += fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, jj ); } // for ( int jj = 0; jj < ndlg; ++jj ) // { // const int incr = fetch_JDSdlg<useTexture,useSharedMem>( jdsDlg, dlg, jj ); // if ( i < incr ) // { // IndexType j = jdsJA[k]; // value += jdsValues[k] * fetch_JDSx<ValueType,useTexture>( x_d, j ); // k += incr; // } // else // { // break; // } // } result_d[perm] = alpha * value + summand; } } /* --------------------------------------------------------------------------- */ template<typename ValueType> void CUDAJDSUtils::normalGEMV( ValueType result[], const ValueType alpha, const ValueType x[], const ValueType beta, const ValueType y[], const IndexType numRows, const IndexType jdsPerm[], const IndexType jdsILG[], const IndexType ndlg, const IndexType jdsDLG[], const IndexType jdsJA[], const ValueType jdsValues[], SyncToken* /* syncToken */) { LAMA_REGION( "CUDA.JDS.normalGEMV" ) LAMA_LOG_INFO( logger, "normalGEMV<" << typeid(ValueType).name() << ">" << ", #rows = " << numRows ) LAMA_LOG_INFO( logger, "alpha = " << alpha << ", x = " << x << ", beta = " << beta << ", y = " << y << ", result = " << result ) const bool useTexture = false; // still problems: CUDATexture::useTexture(); const bool useSharedMem = false; // maybe optimize later LAMA_LOG_DEBUG( logger, "useTexture = " << useTexture << ", useSharedMem = " << useSharedMem ) const int block_size = ( numRows > 8191 ? 256 : 128 ); dim3 dimBlock( block_size, 1, 1 ); dim3 dimGrid = makeGrid( numRows, dimBlock.x ); LAMA_CHECK_CUDA_ACCESS if ( useTexture ) { if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSDXref, x ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSSXref, x ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); } if ( useSharedMem ) { const int sharedMemSize = ndlg * sizeof(int); cudaFuncSetCacheConfig( jdsgemvKernel<ValueType,true,true>, cudaFuncCachePreferL1 ); jdsgemvKernel<ValueType, true, true><<<dimGrid,dimBlock,sharedMemSize>>> ( numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } else // no sharedMem { LAMA_CUDA_RT_CALL( cudaBindTexture( NULL, texJDSdlgRef, jdsDLG ), "LAMA_STATUS_CUDA_BINDTEX_FAILED" ); cudaFuncSetCacheConfig( jdsgemvKernel<ValueType,true,false>, cudaFuncCachePreferL1 ); jdsgemvKernel<ValueType, true, false><<<dimGrid,dimBlock>>> ( numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } // skip the following in case of asynchronous execution LAMA_CUDA_RT_CALL( cudaStreamSynchronize(0), "JDS: gemvKernel FAILED" ) if ( useSharedMem ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSdlgRef ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } if ( sizeof(ValueType) == sizeof(double) ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSDXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } else if ( sizeof(ValueType) == sizeof(float) ) { LAMA_CUDA_RT_CALL( cudaUnbindTexture( texJDSSXref ), "LAMA_STATUS_CUDA_UNBINDTEX_FAILED" ); } } else // no Texture cache { if ( useSharedMem ) { const int sharedMemSize = ndlg * sizeof(int); cudaFuncSetCacheConfig( jdsgemvKernel<ValueType,false,true>, cudaFuncCachePreferL1 ); jdsgemvKernel<ValueType, false, true><<<dimGrid,dimBlock,sharedMemSize>>> ( numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } else // no sharedMem { cudaFuncSetCacheConfig( jdsgemvKernel<ValueType,false,false>, cudaFuncCachePreferL1 ); jdsgemvKernel<ValueType, false, false><<<dimGrid,dimBlock>>> ( numRows, alpha, jdsValues, jdsDLG, ndlg, jdsILG, jdsJA, jdsPerm, x, beta, y, result); } } LAMA_CHECK_CUDA_ERROR cudaStreamSynchronize( 0 ); } /* --------------------------------------------------------------------------- */ void CUDAJDSUtils::setInterface( JDSUtilsInterface& JDSUtils ) { LAMA_LOG_INFO( logger, "set JDS routines for CUDA in Interface" ) LAMA_INTERFACE_REGISTER( JDSUtils, sortRows ) LAMA_INTERFACE_REGISTER( JDSUtils, checkDiagonalProperty ) LAMA_INTERFACE_REGISTER( JDSUtils, check ) LAMA_INTERFACE_REGISTER( JDSUtils, ilg2dlg ) LAMA_INTERFACE_REGISTER( JDSUtils, setInversePerm ) LAMA_INTERFACE_REGISTER_T( JDSUtils, setDiagonalWithScalar, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, setDiagonalWithScalar, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, scaleValue, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getRow, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getValue, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, getCSRValues, double, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, float, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, float, double ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, double, float ) LAMA_INTERFACE_REGISTER_TT( JDSUtils, setCSRValues, double, double ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobi, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobi, double ) LAMA_INTERFACE_REGISTER_T( JDSUtils, normalGEMV, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, normalGEMV, double ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobiHalo, float ) LAMA_INTERFACE_REGISTER_T( JDSUtils, jacobiHalo, double ) } /* --------------------------------------------------------------------------- */ /* Static registration of the Utils routines */ /* --------------------------------------------------------------------------- */ bool CUDAJDSUtils::registerInterface() { LAMAInterface& interface = LAMAInterfaceRegistry::getRegistry().modifyInterface( Context::CUDA ); setInterface( interface.JDSUtils ); return true; } /* --------------------------------------------------------------------------- */ /* Static initialiazion at program start */ /* --------------------------------------------------------------------------- */ bool CUDAJDSUtils::initialized = registerInterface(); } // namespace lama
bb9218f10f12e846c996ae211b21da857b46c1ad.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> #define lim 99 #define threads 10 void print(int *w){ for(int i=0; i<lim; i++){ printf("%d\n", w[i]); } } void fillVector(int *w){ for(int i=0; i<lim; i++){ w[i]=i; } } __global__ void add(int *d_x, int *d_y, int *d_z){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<lim){ d_z[i] = d_x[i] + d_y[i]; } } int main(int argc, char const *argv[]) { int *x = (int*)malloc(lim*sizeof(int)); int *y = (int*)malloc(lim*sizeof(int)); int *z = (int*)malloc(lim*sizeof(int)); fillVector(x); fillVector(y); int *d_x, *d_y, *d_z; clock_t begin, end; double time_spent; begin = clock(); hipMalloc((void**)&d_x, lim*sizeof(int)); hipMalloc((void**)&d_y, lim*sizeof(int)); hipMalloc((void**)&d_z, lim*sizeof(int)); hipMemcpy(d_x, x, lim*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_y, y, lim*sizeof(int), hipMemcpyHostToDevice); int blocks = ceil((float)lim/threads); //printf("%d",blocks); hipLaunchKernelGGL(( add), dim3(blocks),dim3(threads), 0, 0, d_x, d_y, d_z); hipMemcpy(z, d_z, lim*sizeof(int), hipMemcpyDeviceToHost); //print(z); hipFree(d_x); hipFree(d_y); hipFree(d_z); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("%lf\n", time_spent); free(x); free(y); free(z); return 0; }
bb9218f10f12e846c996ae211b21da857b46c1ad.cu
#include <stdio.h> #include <cuda.h> #include <time.h> #define lim 99 #define threads 10 void print(int *w){ for(int i=0; i<lim; i++){ printf("%d\n", w[i]); } } void fillVector(int *w){ for(int i=0; i<lim; i++){ w[i]=i; } } __global__ void add(int *d_x, int *d_y, int *d_z){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<lim){ d_z[i] = d_x[i] + d_y[i]; } } int main(int argc, char const *argv[]) { int *x = (int*)malloc(lim*sizeof(int)); int *y = (int*)malloc(lim*sizeof(int)); int *z = (int*)malloc(lim*sizeof(int)); fillVector(x); fillVector(y); int *d_x, *d_y, *d_z; clock_t begin, end; double time_spent; begin = clock(); cudaMalloc((void**)&d_x, lim*sizeof(int)); cudaMalloc((void**)&d_y, lim*sizeof(int)); cudaMalloc((void**)&d_z, lim*sizeof(int)); cudaMemcpy(d_x, x, lim*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, lim*sizeof(int), cudaMemcpyHostToDevice); int blocks = ceil((float)lim/threads); //printf("%d",blocks); add<<<blocks,threads>>>(d_x, d_y, d_z); cudaMemcpy(z, d_z, lim*sizeof(int), cudaMemcpyDeviceToHost); //print(z); cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("%lf\n", time_spent); free(x); free(y); free(z); return 0; }
e9447cd14776334b2e8fad79394a8250a32fd5df.hip
// !!! This is a file automatically generated by hipify!!! /** * Most of the follow code falls under the following stipulation from NVIDIA. * * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <iostream> #include <random> #include <chrono> #include <stdio.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include "nvgraph.h" #include <ImagesCPU.h> #include <ImagesNPP.h> #include <ImageIO.h> #include <Exceptions.h> #include <string.h> #include <fstream> #include <iostream> #include <hip/hip_runtime.h> #include <npp.h> #include <helper_string.h> #include <helper_cuda.h> // Checks the given nvGraphStatus for errors, then outputs // a log and exits the program if any errors are produced. void check_status(const nvgraphStatus_t & status) { if ((int)status != 0) { printf("ERROR : %d\n",status); exit(0); } } // Prints the NPP library information bool printfNPPinfo() { const NppLibraryVersion *libVer = nppGetLibVersion(); printf("NPP Library Version %d.%d.%d\n", libVer->major, libVer->minor, libVer->build); int driverVersion, runtimeVersion; hipDriverGetVersion(&driverVersion); hipRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version: %d.%d\n", driverVersion/1000, (driverVersion%100)/10); printf(" CUDA Runtime Version: %d.%d\n", runtimeVersion/1000, (runtimeVersion%100)/10); // Min spec is SM 1.0 devices bool bVal = checkCudaCapabilities(1, 0); return bVal; } // Initializes a CUDA device and returns the device ID for future use. int cudaDeviceInit(int argc, const char **argv) { int deviceCount; checkCudaErrors(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { std::cerr << "CUDA error: no devices supporting CUDA." << std::endl; exit(EXIT_FAILURE); } int dev = findCudaDevice(argc, argv); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); std::cerr << "hipSetDevice GPU" << dev << " = " << deviceProp.name << std::endl; checkCudaErrors(hipSetDevice(dev)); return dev; } // Runs a box filter test using the given CUDA device parameters. int boxFilterNPPTest(int argc, char **argv) { try { std::string sFilename; char *filePath; cudaDeviceInit(argc, (const char **)argv); if (printfNPPinfo() == false) { exit(EXIT_SUCCESS); } if (checkCmdLineFlag(argc, (const char **)argv, "input")) { getCmdLineArgumentString(argc, (const char **)argv, "input", &filePath); } else { filePath = sdkFindFilePath("Lena.pgm", argv[0]); } if (filePath) { sFilename = filePath; } else { sFilename = "Lena.pgm"; } // if we specify the filename at the command line, then we only test sFilename[0]. int file_errors = 0; std::ifstream infile(sFilename.data(), std::ifstream::in); if (infile.good()) { std::cout << "boxFilterNPP opened: <" << sFilename.data() << "> successfully!" << std::endl; file_errors = 0; infile.close(); } else { std::cout << "boxFilterNPP unable to open: <" << sFilename.data() << ">" << std::endl; file_errors++; infile.close(); } if (file_errors > 0) { exit(EXIT_FAILURE); } std::string sResultFilename = sFilename; std::string::size_type dot = sResultFilename.rfind('.'); if (dot != std::string::npos) { sResultFilename = sResultFilename.substr(0, dot); } sResultFilename += "_boxFilter.pgm"; if (checkCmdLineFlag(argc, (const char **)argv, "output")) { char *outputFilePath; getCmdLineArgumentString(argc, (const char **)argv, "output", &outputFilePath); sResultFilename = outputFilePath; } // declare a host image object for an 8-bit grayscale image npp::ImageCPU_8u_C1 oHostSrc; // load gray-scale image from disk npp::loadImage(sFilename, oHostSrc); // declare a device image and copy construct from the host image, // i.e. upload host to device npp::ImageNPP_8u_C1 oDeviceSrc(oHostSrc); int boxfilterSize = 5; if (checkCmdLineFlag(argc, (const char **)argv, "boxfilterSize")) { boxfilterSize = getCmdLineArgumentInt(argc, (const char **)argv, "boxfilterSize"); } // create struct with box-filter mask size NppiSize oMaskSize = {boxfilterSize, boxfilterSize}; NppiSize oSrcSize = {(int)oDeviceSrc.width(), (int)oDeviceSrc.height()}; NppiPoint oSrcOffset = {0, 0}; // create struct with ROI size NppiSize oSizeROI = {(int)oDeviceSrc.width() , (int)oDeviceSrc.height() }; // allocate device image of appropriately reduced size npp::ImageNPP_8u_C1 oDeviceDst(oSizeROI.width, oSizeROI.height); // set anchor point inside the mask to (oMaskSize.width / 2, oMaskSize.height / 2) // It should round down when odd NppiPoint oAnchor = {oMaskSize.width / 2, oMaskSize.height / 2}; // run box filter nppiFilterBoxBorder_8u_C1R(oDeviceSrc.data(), oDeviceSrc.pitch(), oSrcSize, oSrcOffset, oDeviceDst.data(), oDeviceDst.pitch(), oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE); // declare a host image for the result npp::ImageCPU_8u_C1 oHostDst(oDeviceDst.size()); // and copy the device result data into it oDeviceDst.copyTo(oHostDst.data(), oHostDst.pitch()); saveImage(sResultFilename, oHostDst); std::cout << "Saved image: " << sResultFilename << std::endl; nppiFree(oDeviceSrc.data()); nppiFree(oDeviceDst.data()); } catch (npp::Exception &rException) { std::cerr << "Program error! The following exception occurred: \n"; std::cerr << rException << std::endl; std::cerr << "Aborting." << std::endl; exit(EXIT_FAILURE); } catch (...) { std::cerr << "Program error! An unknow type of exception occurred. \n"; std::cerr << "Aborting." << std::endl; exit(EXIT_FAILURE); return -1; } return 0; } // Execute the NVIDIA example page rank test. This algorithm computes weights of various int pageRankTest(int argc, char **argv) { size_t n = 6, nnz = 10, vertex_numsets = 3, edge_numsets = 1; const float alpha1 = 0.85, alpha2 = 0.90; const void *alpha1_p = (const void *) &alpha1, *alpha2_p = (const void *) &alpha2; int i, *destination_offsets_h, *source_indices_h; float *weights_h, *bookmark_h, *pr_1,*pr_2; void** vertex_dim; if (checkCmdLineFlag(argc, (const char **)argv, "pageRankSize")) { n = getCmdLineArgumentInt(argc, (const char **)argv, "pageRankSize"); } if (checkCmdLineFlag(argc, (const char **)argv, "pageRankWeightSize")) { nnz = getCmdLineArgumentInt(argc, (const char **)argv, "pageRankWeightSize"); } // nvgraph variables nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphGraphDescr_t graph; nvgraphCSCTopology32I_t CSC_input; hipDataType edge_dimT = HIP_R_32F; hipDataType* vertex_dimT; // use command-line specified CUDA device, otherwise use device with highest Gflops/s int cuda_device = 0; cuda_device = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDevice(&cuda_device)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); if (deviceProp.major < 3) { printf("> nvGraph requires device SM 3.0+\n"); printf("> Waiving.\n"); exit(EXIT_WAIVED); } // Allocate host data destination_offsets_h = (int*) malloc((n+1)*sizeof(int)); source_indices_h = (int*) malloc(nnz*sizeof(int)); weights_h = (float*)malloc(nnz*sizeof(float)); bookmark_h = (float*)malloc(n*sizeof(float)); pr_1 = (float*)malloc(n*sizeof(float)); pr_2 = (float*)malloc(n*sizeof(float)); vertex_dim = (void**)malloc(vertex_numsets*sizeof(void*)); vertex_dimT = (hipDataType*)malloc(vertex_numsets*sizeof(hipDataType)); CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st)); // Initialize host data vertex_dim[0] = (void*)bookmark_h; vertex_dim[1]= (void*)pr_1, vertex_dim[2]= (void*)pr_2; vertex_dimT[0] = HIP_R_32F; vertex_dimT[1]= HIP_R_32F, vertex_dimT[2]= HIP_R_32F; // Create a random generate that will generate random numbers from 0 to 1.0. // Use a set seed so output is deterministic unsigned seed = 12345; std::default_random_engine gen(seed); std::uniform_real_distribution<float> floatDist(0.0,1.0); std::uniform_int_distribution<int> intDist(0,nnz); for (i = 0; i < nnz; ++i) { weights_h[i] = floatDist(gen); } for (i = 0; i < n; ++i) { destination_offsets_h[i] = i; } destination_offsets_h[n] = nnz; for (i = 0; i < nnz; ++i) { source_indices_h[i] = intDist(gen); } for (int i = 0; i < n; ++i) { bookmark_h[i] = floatDist(gen); } // Starting nvgraph check_status(nvgraphCreate (&handle)); check_status(nvgraphCreateGraphDescr (handle, &graph)); CSC_input->nvertices = n; CSC_input->nedges = nnz; CSC_input->destination_offsets = destination_offsets_h; CSC_input->source_indices = source_indices_h; // Set graph connectivity and properties (tranfers) check_status(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32)); check_status(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT)); check_status(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT)); for (i = 0; i < 2; ++i) check_status(nvgraphSetVertexData(handle, graph, vertex_dim[i], i)); check_status(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0)); // First run with default values check_status(nvgraphPagerank(handle, graph, 0, alpha1_p, 0, 0, 1, 0.5f, 100)); // Get and print result check_status(nvgraphGetVertexData(handle, graph, vertex_dim[1], 1)); printf("pr_1, alpha = 0.85\n"); for (i = 0; i<n; i++) printf("%f\n",pr_1[i]); printf("\n"); // Second run with different damping factor and an initial guess for (i = 0; i<n; i++) pr_2[i] =pr_1[i]; nvgraphSetVertexData(handle, graph, vertex_dim[2], 2); check_status(nvgraphPagerank(handle, graph, 0, alpha2_p, 0, 1, 2, 0.5f, 100)); // Get and print result check_status(nvgraphGetVertexData(handle, graph, vertex_dim[2], 2)); printf("pr_2, alpha = 0.90\n"); for (i = 0; i<n; i++) printf("%f\n",pr_2[i]); printf("\n"); //Clean check_status(nvgraphDestroyGraphDescr(handle, graph)); check_status(nvgraphDestroy(handle)); free(destination_offsets_h); free(source_indices_h); free(weights_h); free(bookmark_h); free(pr_1); free(pr_2); free(vertex_dim); free(vertex_dimT); free(CSC_input); printf("\nDone!\n"); return EXIT_SUCCESS; } // Executes NVIDIA examples of using the NPP and nvgraph libraries // These use the pagerank algorithm as well as the box filter algorithm. int main(int argc, char** argv) { auto startTime = std::chrono::system_clock::now(); pageRankTest(argc, argv); auto endTime = std::chrono::system_clock::now(); std::chrono::duration<double> totalTime = endTime-startTime; std::cout << "PageRank execution took: " << totalTime.count() << " seconds." << std::endl; startTime = std::chrono::system_clock::now(); boxFilterNPPTest(argc, argv); endTime = std::chrono::system_clock::now(); totalTime = endTime-startTime; std::cout << "boxFilter execution took: " << totalTime.count() << " seconds." << std::endl; return 0; }
e9447cd14776334b2e8fad79394a8250a32fd5df.cu
/** * Most of the follow code falls under the following stipulation from NVIDIA. * * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <iostream> #include <random> #include <chrono> #include <stdio.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include "nvgraph.h" #include <ImagesCPU.h> #include <ImagesNPP.h> #include <ImageIO.h> #include <Exceptions.h> #include <string.h> #include <fstream> #include <iostream> #include <cuda_runtime.h> #include <npp.h> #include <helper_string.h> #include <helper_cuda.h> // Checks the given nvGraphStatus for errors, then outputs // a log and exits the program if any errors are produced. void check_status(const nvgraphStatus_t & status) { if ((int)status != 0) { printf("ERROR : %d\n",status); exit(0); } } // Prints the NPP library information bool printfNPPinfo() { const NppLibraryVersion *libVer = nppGetLibVersion(); printf("NPP Library Version %d.%d.%d\n", libVer->major, libVer->minor, libVer->build); int driverVersion, runtimeVersion; cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version: %d.%d\n", driverVersion/1000, (driverVersion%100)/10); printf(" CUDA Runtime Version: %d.%d\n", runtimeVersion/1000, (runtimeVersion%100)/10); // Min spec is SM 1.0 devices bool bVal = checkCudaCapabilities(1, 0); return bVal; } // Initializes a CUDA device and returns the device ID for future use. int cudaDeviceInit(int argc, const char **argv) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { std::cerr << "CUDA error: no devices supporting CUDA." << std::endl; exit(EXIT_FAILURE); } int dev = findCudaDevice(argc, argv); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); std::cerr << "cudaSetDevice GPU" << dev << " = " << deviceProp.name << std::endl; checkCudaErrors(cudaSetDevice(dev)); return dev; } // Runs a box filter test using the given CUDA device parameters. int boxFilterNPPTest(int argc, char **argv) { try { std::string sFilename; char *filePath; cudaDeviceInit(argc, (const char **)argv); if (printfNPPinfo() == false) { exit(EXIT_SUCCESS); } if (checkCmdLineFlag(argc, (const char **)argv, "input")) { getCmdLineArgumentString(argc, (const char **)argv, "input", &filePath); } else { filePath = sdkFindFilePath("Lena.pgm", argv[0]); } if (filePath) { sFilename = filePath; } else { sFilename = "Lena.pgm"; } // if we specify the filename at the command line, then we only test sFilename[0]. int file_errors = 0; std::ifstream infile(sFilename.data(), std::ifstream::in); if (infile.good()) { std::cout << "boxFilterNPP opened: <" << sFilename.data() << "> successfully!" << std::endl; file_errors = 0; infile.close(); } else { std::cout << "boxFilterNPP unable to open: <" << sFilename.data() << ">" << std::endl; file_errors++; infile.close(); } if (file_errors > 0) { exit(EXIT_FAILURE); } std::string sResultFilename = sFilename; std::string::size_type dot = sResultFilename.rfind('.'); if (dot != std::string::npos) { sResultFilename = sResultFilename.substr(0, dot); } sResultFilename += "_boxFilter.pgm"; if (checkCmdLineFlag(argc, (const char **)argv, "output")) { char *outputFilePath; getCmdLineArgumentString(argc, (const char **)argv, "output", &outputFilePath); sResultFilename = outputFilePath; } // declare a host image object for an 8-bit grayscale image npp::ImageCPU_8u_C1 oHostSrc; // load gray-scale image from disk npp::loadImage(sFilename, oHostSrc); // declare a device image and copy construct from the host image, // i.e. upload host to device npp::ImageNPP_8u_C1 oDeviceSrc(oHostSrc); int boxfilterSize = 5; if (checkCmdLineFlag(argc, (const char **)argv, "boxfilterSize")) { boxfilterSize = getCmdLineArgumentInt(argc, (const char **)argv, "boxfilterSize"); } // create struct with box-filter mask size NppiSize oMaskSize = {boxfilterSize, boxfilterSize}; NppiSize oSrcSize = {(int)oDeviceSrc.width(), (int)oDeviceSrc.height()}; NppiPoint oSrcOffset = {0, 0}; // create struct with ROI size NppiSize oSizeROI = {(int)oDeviceSrc.width() , (int)oDeviceSrc.height() }; // allocate device image of appropriately reduced size npp::ImageNPP_8u_C1 oDeviceDst(oSizeROI.width, oSizeROI.height); // set anchor point inside the mask to (oMaskSize.width / 2, oMaskSize.height / 2) // It should round down when odd NppiPoint oAnchor = {oMaskSize.width / 2, oMaskSize.height / 2}; // run box filter nppiFilterBoxBorder_8u_C1R(oDeviceSrc.data(), oDeviceSrc.pitch(), oSrcSize, oSrcOffset, oDeviceDst.data(), oDeviceDst.pitch(), oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE); // declare a host image for the result npp::ImageCPU_8u_C1 oHostDst(oDeviceDst.size()); // and copy the device result data into it oDeviceDst.copyTo(oHostDst.data(), oHostDst.pitch()); saveImage(sResultFilename, oHostDst); std::cout << "Saved image: " << sResultFilename << std::endl; nppiFree(oDeviceSrc.data()); nppiFree(oDeviceDst.data()); } catch (npp::Exception &rException) { std::cerr << "Program error! The following exception occurred: \n"; std::cerr << rException << std::endl; std::cerr << "Aborting." << std::endl; exit(EXIT_FAILURE); } catch (...) { std::cerr << "Program error! An unknow type of exception occurred. \n"; std::cerr << "Aborting." << std::endl; exit(EXIT_FAILURE); return -1; } return 0; } // Execute the NVIDIA example page rank test. This algorithm computes weights of various int pageRankTest(int argc, char **argv) { size_t n = 6, nnz = 10, vertex_numsets = 3, edge_numsets = 1; const float alpha1 = 0.85, alpha2 = 0.90; const void *alpha1_p = (const void *) &alpha1, *alpha2_p = (const void *) &alpha2; int i, *destination_offsets_h, *source_indices_h; float *weights_h, *bookmark_h, *pr_1,*pr_2; void** vertex_dim; if (checkCmdLineFlag(argc, (const char **)argv, "pageRankSize")) { n = getCmdLineArgumentInt(argc, (const char **)argv, "pageRankSize"); } if (checkCmdLineFlag(argc, (const char **)argv, "pageRankWeightSize")) { nnz = getCmdLineArgumentInt(argc, (const char **)argv, "pageRankWeightSize"); } // nvgraph variables nvgraphStatus_t status; nvgraphHandle_t handle; nvgraphGraphDescr_t graph; nvgraphCSCTopology32I_t CSC_input; cudaDataType_t edge_dimT = CUDA_R_32F; cudaDataType_t* vertex_dimT; // use command-line specified CUDA device, otherwise use device with highest Gflops/s int cuda_device = 0; cuda_device = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDevice(&cuda_device)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); if (deviceProp.major < 3) { printf("> nvGraph requires device SM 3.0+\n"); printf("> Waiving.\n"); exit(EXIT_WAIVED); } // Allocate host data destination_offsets_h = (int*) malloc((n+1)*sizeof(int)); source_indices_h = (int*) malloc(nnz*sizeof(int)); weights_h = (float*)malloc(nnz*sizeof(float)); bookmark_h = (float*)malloc(n*sizeof(float)); pr_1 = (float*)malloc(n*sizeof(float)); pr_2 = (float*)malloc(n*sizeof(float)); vertex_dim = (void**)malloc(vertex_numsets*sizeof(void*)); vertex_dimT = (cudaDataType_t*)malloc(vertex_numsets*sizeof(cudaDataType_t)); CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st)); // Initialize host data vertex_dim[0] = (void*)bookmark_h; vertex_dim[1]= (void*)pr_1, vertex_dim[2]= (void*)pr_2; vertex_dimT[0] = CUDA_R_32F; vertex_dimT[1]= CUDA_R_32F, vertex_dimT[2]= CUDA_R_32F; // Create a random generate that will generate random numbers from 0 to 1.0. // Use a set seed so output is deterministic unsigned seed = 12345; std::default_random_engine gen(seed); std::uniform_real_distribution<float> floatDist(0.0,1.0); std::uniform_int_distribution<int> intDist(0,nnz); for (i = 0; i < nnz; ++i) { weights_h[i] = floatDist(gen); } for (i = 0; i < n; ++i) { destination_offsets_h[i] = i; } destination_offsets_h[n] = nnz; for (i = 0; i < nnz; ++i) { source_indices_h[i] = intDist(gen); } for (int i = 0; i < n; ++i) { bookmark_h[i] = floatDist(gen); } // Starting nvgraph check_status(nvgraphCreate (&handle)); check_status(nvgraphCreateGraphDescr (handle, &graph)); CSC_input->nvertices = n; CSC_input->nedges = nnz; CSC_input->destination_offsets = destination_offsets_h; CSC_input->source_indices = source_indices_h; // Set graph connectivity and properties (tranfers) check_status(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32)); check_status(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT)); check_status(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT)); for (i = 0; i < 2; ++i) check_status(nvgraphSetVertexData(handle, graph, vertex_dim[i], i)); check_status(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0)); // First run with default values check_status(nvgraphPagerank(handle, graph, 0, alpha1_p, 0, 0, 1, 0.5f, 100)); // Get and print result check_status(nvgraphGetVertexData(handle, graph, vertex_dim[1], 1)); printf("pr_1, alpha = 0.85\n"); for (i = 0; i<n; i++) printf("%f\n",pr_1[i]); printf("\n"); // Second run with different damping factor and an initial guess for (i = 0; i<n; i++) pr_2[i] =pr_1[i]; nvgraphSetVertexData(handle, graph, vertex_dim[2], 2); check_status(nvgraphPagerank(handle, graph, 0, alpha2_p, 0, 1, 2, 0.5f, 100)); // Get and print result check_status(nvgraphGetVertexData(handle, graph, vertex_dim[2], 2)); printf("pr_2, alpha = 0.90\n"); for (i = 0; i<n; i++) printf("%f\n",pr_2[i]); printf("\n"); //Clean check_status(nvgraphDestroyGraphDescr(handle, graph)); check_status(nvgraphDestroy(handle)); free(destination_offsets_h); free(source_indices_h); free(weights_h); free(bookmark_h); free(pr_1); free(pr_2); free(vertex_dim); free(vertex_dimT); free(CSC_input); printf("\nDone!\n"); return EXIT_SUCCESS; } // Executes NVIDIA examples of using the NPP and nvgraph libraries // These use the pagerank algorithm as well as the box filter algorithm. int main(int argc, char** argv) { auto startTime = std::chrono::system_clock::now(); pageRankTest(argc, argv); auto endTime = std::chrono::system_clock::now(); std::chrono::duration<double> totalTime = endTime-startTime; std::cout << "PageRank execution took: " << totalTime.count() << " seconds." << std::endl; startTime = std::chrono::system_clock::now(); boxFilterNPPTest(argc, argv); endTime = std::chrono::system_clock::now(); totalTime = endTime-startTime; std::cout << "boxFilter execution took: " << totalTime.count() << " seconds." << std::endl; return 0; }
74fe3c70e041e23095b4c82bfb379a63cd244a64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Arman Pazouki, Milad Rakhsha, Wei Hu // ============================================================================= // // Base class for processing boundary condition enforcing (bce) markers forces // in FSI system. // ============================================================================= #include "chrono_fsi/physics/ChBce.cuh" //for FsiGeneralData #include "chrono_fsi/physics/ChSphGeneral.cuh" #include <type_traits> namespace chrono { namespace fsi { //-------------------------------------------------------------------------------------------------------------------------------- __device__ double atomicAdd_double(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Populate_RigidSPH_MeshPos_LRF_kernel(Real3* rigidSPH_MeshPos_LRF_D, Real4* posRadD, uint* rigidIdentifierD, Real3* posRigidD, Real4* qD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numRigidMarkers) { return; } int rigidIndex = rigidIdentifierD[index]; uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers; Real4 q4 = qD[rigidIndex]; Real3 a1, a2, a3; RotationMatirixFromQuaternion(a1, a2, a3, q4); Real3 dist3 = mR3(posRadD[rigidMarkerIndex]) - posRigidD[rigidIndex]; Real3 dist3LF = InverseRotate_By_RotationMatrix_DeviceHost(a1, a2, a3, dist3); rigidSPH_MeshPos_LRF_D[index] = dist3LF; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Populate_FlexSPH_MeshPos_LRF_kernel(Real3* FlexSPH_MeshPos_LRF_D, Real3* FlexSPH_MeshPos_LRF_H, Real4* posRadD, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodes, uint4* ShellElementsNodes, Real3* pos_fsi_fea_D, Real Spacing) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numFlexMarkers) { return; } int FlexIndex = FlexIdentifierD[index]; uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers; if (FlexIndex < numFlex1D) { uint2 cableNodes = CableElementsNodes[FlexIndex]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[cableNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[cableNodes.y]; Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - pos_fsi_fea_D_nA; Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA; Real Cable_x = length(x_dir); x_dir = x_dir / length(x_dir); Real norm_dir_length = length(cross(dist3, x_dir)); Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y); y_dir = y_dir / length(y_dir); Real3 z_dir = cross(x_dir, y_dir); Real dx = dot(dist3, x_dir); Real dy = dot(dist3, y_dir); Real dz = dot(dist3, z_dir); if (abs(dy) > 0) dy /= Spacing; if (abs(dz) > 0) dz /= Spacing; FlexSPH_MeshPos_LRF_D[index] = mR3(dx / Cable_x, dy, dz); } if (FlexIndex >= numFlex1D) { uint4 shellNodes = ShellElementsNodes[FlexIndex - numFlex1D]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y]; Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z]; Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w]; Real3 Shell_center = 0.25 * (pos_fsi_fea_D_nA + pos_fsi_fea_D_nB + pos_fsi_fea_D_nC + pos_fsi_fea_D_nD); Real Shell_x = 0.25 * length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nD); Real Shell_y = 0.25 * length(pos_fsi_fea_D_nD - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nB); Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - Shell_center; Real3 physic_to_natural = mR3(1.0 / Shell_x, 1.0 / Shell_y, 1); Real3 pos_physical = FlexSPH_MeshPos_LRF_H[index]; Real3 pos_natural = mR3(pos_physical.x * physic_to_natural.x, pos_physical.y * physic_to_natural.y, pos_physical.z * physic_to_natural.z); Real3 n1 = normalize(cross(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA, pos_fsi_fea_D_nC - pos_fsi_fea_D_nB)); Real3 n2 = normalize(cross(pos_fsi_fea_D_nC - pos_fsi_fea_D_nB, pos_fsi_fea_D_nD - pos_fsi_fea_D_nC)); Real3 n3 = normalize(cross(pos_fsi_fea_D_nD - pos_fsi_fea_D_nC, pos_fsi_fea_D_nA - pos_fsi_fea_D_nD)); Real3 n4 = normalize(cross(pos_fsi_fea_D_nA - pos_fsi_fea_D_nD, pos_fsi_fea_D_nB - pos_fsi_fea_D_nA)); Real3 Normal = normalize(n1 + n2 + n3 + n4); Real zSide = dot(Normal, dist3) / Spacing; FlexSPH_MeshPos_LRF_D[index] = FlexSPH_MeshPos_LRF_H[index]; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Rigid_FSI_ForcesD_TorquesD(Real3* rigid_FSI_ForcesD, Real3* rigid_FSI_TorquesD, Real4* derivVelRhoD, Real4* derivVelRhoD_old, Real4* posRadD, uint* rigidIdentifierD, Real3* posRigidD, Real3* rigidSPH_MeshPos_LRF_D, Real4* qD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numRigidMarkers) { return; } int RigidIndex = rigidIdentifierD[index]; uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers; derivVelRhoD[rigidMarkerIndex] = (derivVelRhoD[rigidMarkerIndex] * paramsD.Beta + derivVelRhoD_old[rigidMarkerIndex] * (1 - paramsD.Beta)) * paramsD.markerMass; if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].x), derivVelRhoD[rigidMarkerIndex].x); atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].y), derivVelRhoD[rigidMarkerIndex].y); atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].z), derivVelRhoD[rigidMarkerIndex].z); } else { atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].x), derivVelRhoD[rigidMarkerIndex].x); atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].y), derivVelRhoD[rigidMarkerIndex].y); atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].z), derivVelRhoD[rigidMarkerIndex].z); } Real3 dist3 = Distance(mR3(posRadD[rigidMarkerIndex]), posRigidD[RigidIndex]); Real3 mtorque = cross(dist3, mR3(derivVelRhoD[rigidMarkerIndex])); if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x); atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y); atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z); } else { atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x); atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y); atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z); } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Flex_FSI_ForcesD(Real3* FlexSPH_MeshPos_LRF_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodes, // This is the connectivity of FEA mesh. uint4* ShellElementsNodes, // This is the connectivity of FEA mesh. Real4* derivVelRhoD, Real4* derivVelRhoD_old, Real3* pos_fsi_fea_D, Real3* Flex_FSI_ForcesD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numFlexMarkers) { return; } int FlexIndex = FlexIdentifierD[index]; uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers; derivVelRhoD[FlexMarkerIndex] = (derivVelRhoD[FlexMarkerIndex] * paramsD.Beta + derivVelRhoD_old[FlexMarkerIndex] * (1 - paramsD.Beta)) * paramsD.markerMass; if (FlexIndex < numFlex1D) { Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x); Real NA = N_cable.x; Real NB = N_cable.y; int nA = CableElementsNodes[FlexIndex].x; int nB = CableElementsNodes[FlexIndex].y; if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); } else { atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); } } if (FlexIndex >= numFlex1D) { Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y); Real NA = N_shell.x; Real NB = N_shell.y; Real NC = N_shell.z; Real ND = N_shell.w; int nA = ShellElementsNodes[FlexIndex - numFlex1D].x; int nB = ShellElementsNodes[FlexIndex - numFlex1D].y; int nC = ShellElementsNodes[FlexIndex - numFlex1D].z; int nD = ShellElementsNodes[FlexIndex - numFlex1D].w; if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].x), NC * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].y), NC * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].z), NC * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].x), ND * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].y), ND * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].z), ND * derivVelRhoD[FlexMarkerIndex].z); } else { atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nC].x), NC * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nC].y), NC * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nC].z), NC * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nD].x), ND * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nD].y), ND * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nD].z), ND * derivVelRhoD[FlexMarkerIndex].z); } } } //-------------------------------------------------------------------------------------------------------------------------------- // collide a particle against all other particles in a given cell __device__ void BCE_modification_Share(Real3& sumVW, Real3& sumRhoRW, Real& sumPW, Real& sumWFluid, int& isAffectedV, int& isAffectedP, int3 gridPos, Real3 posRadA, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd) { uint gridHash = calcGridHash(gridPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (dd > RESOLUTION_LENGTH_MULT * paramsD.HSML * RESOLUTION_LENGTH_MULT * paramsD.HSML || rhoPresMuB.w > -0.5) continue; Real d = length(dist3); Real Wd = W3h(d, sortedPosRad[j].w); Real3 velMasB = sortedVelMas[j]; sumVW += velMasB * Wd; sumRhoRW += rhoPresMuB.x * dist3 * Wd; sumPW += rhoPresMuB.y * Wd; sumWFluid += Wd; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void new_BCE_VelocityPressure(Real4* velMassRigid_fsiBodies_D, uint* rigidIdentifierD, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, // input: sorted velocities Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, uint* mapOriginalToSorted, Real3* bceAcc, int2 newPortion, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; uint sphIndex = index + newPortion.x; if (index >= newPortion.y - newPortion.x) { return; } uint bceIndex = index; if (paramsD.bceTypeWall == BceVersion::ORIGINAL) bceIndex = index + numObjectsD.numBoundaryMarkers; uint idA = mapOriginalToSorted[sphIndex]; Real4 rhoPreMuA = sortedRhoPreMu[idA]; Real3 posRadA = mR3(sortedPosRad[idA]); Real3 velMasA = sortedVelMas[idA]; int isAffectedV = 0; int isAffectedP = 0; Real3 sumVW = mR3(0); Real3 sumRhoRW = mR3(0); Real sumPW = 0; Real sumWFluid = 0; // get address in grid int3 gridPos = calcGridPos(posRadA); // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); BCE_modification_Share(sumVW, sumRhoRW, sumPW, sumWFluid, isAffectedV, isAffectedP, neighbourPos, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd); } } } if (abs(sumWFluid) > EPSILON) { // modify velocity Real3 modifiedBCE_v = 2 * velMasA - sumVW / sumWFluid; velMas_ModifiedBCE[bceIndex] = modifiedBCE_v; // modify pressure Real3 aW = mR3(0.0); if (rhoPreMuA.w > 0.5 && rhoPreMuA.w < 1.5) { // only need acceleration of rigid body's BCE particle int rigidBceIndex = sphIndex - numObjectsD.startRigidMarkers; if (rigidBceIndex < 0 || rigidBceIndex >= numObjectsD.numRigidMarkers) { printf( "Error! particle index out of bound: thrown from " "ChBce.cu, new_BCE_VelocityPressure !\n"); *isErrorD = true; return; } aW = bceAcc[rigidBceIndex]; } Real pressure = (sumPW + dot(paramsD.gravity - aW, sumRhoRW)) / sumWFluid; Real density = InvEos(pressure); rhoPreMu_ModifiedBCE[bceIndex] = mR4(density, pressure, rhoPreMuA.z, rhoPreMuA.w); } else { rhoPreMu_ModifiedBCE[bceIndex] = mR4(paramsD.rho0, paramsD.BASEPRES, paramsD.mu0, rhoPreMuA.w); velMas_ModifiedBCE[bceIndex] = mR3(0.0); } sortedVelMas[idA] = velMas_ModifiedBCE[bceIndex]; sortedRhoPreMu[idA] = rhoPreMu_ModifiedBCE[bceIndex]; } //-------------------------------------------------------------------------------------------------------------------------------- // calculate BCE particle's acceleration, required in ADAMI __global__ void calcBceAcceleration_kernel(Real3* bceAcc, Real4* q_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, Real3* rigidSPH_MeshPos_LRF_D, const uint* rigidIdentifierD) { uint bceIndex = blockIdx.x * blockDim.x + threadIdx.x; if (bceIndex >= numObjectsD.numRigidMarkers) { return; } int rigidBodyIndex = rigidIdentifierD[bceIndex]; Real3 acc3 = accRigid_fsiBodies_D[rigidBodyIndex]; // linear acceleration (CM) Real4 q4 = q_fsiBodies_D[rigidBodyIndex]; Real3 a1, a2, a3; RotationMatirixFromQuaternion(a1, a2, a3, q4); Real3 wVel3 = omegaVelLRF_fsiBodies_D[rigidBodyIndex]; Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[bceIndex]; Real3 wVelCrossS = cross(wVel3, rigidSPH_MeshPos_LRF); Real3 wVelCrossWVelCrossS = cross(wVel3, wVelCrossS); acc3 += mR3(dot(a1, wVelCrossWVelCrossS), dot(a2, wVelCrossWVelCrossS), dot(a3, wVelCrossWVelCrossS)); // centrigugal acceleration Real3 wAcc3 = omegaAccLRF_fsiBodies_D[rigidBodyIndex]; Real3 wAccCrossS = cross(wAcc3, rigidSPH_MeshPos_LRF); acc3 += mR3(dot(a1, wAccCrossS), dot(a2, wAccCrossS), dot(a3, wAccCrossS)); // tangential acceleration bceAcc[bceIndex] = acc3; } //-------------------------------------------------------------------------------------------------------------------------------- // updates the rigid body particles __global__ void UpdateRigidMarkersPositionVelocityD(Real4* posRadD, Real3* velMasD, Real3* rigidSPH_MeshPos_LRF_D, uint* rigidIdentifierD, Real3* posRigidD, Real4* velMassRigidD, Real3* omegaLRF_D, Real4* qD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numRigidMarkers) { return; } uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers; int rigidBodyIndex = rigidIdentifierD[index]; Real4 q4 = qD[rigidBodyIndex]; Real3 a1, a2, a3; RotationMatirixFromQuaternion(a1, a2, a3, q4); Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[index]; // position Real h = posRadD[rigidMarkerIndex].w; Real3 p_Rigid = posRigidD[rigidBodyIndex]; Real3 pos = p_Rigid + mR3(dot(a1, rigidSPH_MeshPos_LRF), dot(a2, rigidSPH_MeshPos_LRF), dot(a3, rigidSPH_MeshPos_LRF)); posRadD[rigidMarkerIndex] = mR4(pos, h); // velocity Real4 vM_Rigid = velMassRigidD[rigidBodyIndex]; Real3 omega3 = omegaLRF_D[rigidBodyIndex]; Real3 omegaCrossS = cross(omega3, rigidSPH_MeshPos_LRF); velMasD[rigidMarkerIndex] = mR3(vM_Rigid) + mR3(dot(a1, omegaCrossS), dot(a2, omegaCrossS), dot(a3, omegaCrossS)); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void UpdateFlexMarkersPositionVelocityAccD(Real4* posRadD, Real3* FlexSPH_MeshPos_LRF_D, Real3* velMasD, const uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodes, uint4* ShellelementsNodes, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real Spacing) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numFlexMarkers) { return; } int FlexIndex = FlexIdentifierD[index]; uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers; if (FlexIndex < numFlex1D) { uint2 CableNodes = CableElementsNodes[FlexIndex]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[CableNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[CableNodes.y]; Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA; Real L = length(x_dir); x_dir = x_dir / length(x_dir); Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y); y_dir = y_dir / length(y_dir); Real3 z_dir = cross(x_dir, y_dir); Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x); Real NA = N_cable.x; Real NB = N_cable.y; Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[CableNodes.x]; Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[CableNodes.y]; Real3 physic_to_natural = mR3(1 / L, 1, 1); Real3 pos_natural = mR3(FlexSPH_MeshPos_LRF_D[index].x * physic_to_natural.x, FlexSPH_MeshPos_LRF_D[index].y * physic_to_natural.y, FlexSPH_MeshPos_LRF_D[index].z * physic_to_natural.z); Real2 Nnew = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x); Real h = posRadD[FlexMarkerIndex].w; Real3 tempPos = Nnew.x * pos_fsi_fea_D_nA + Nnew.y * pos_fsi_fea_D_nB + FlexSPH_MeshPos_LRF_D[index].y * y_dir * Spacing + FlexSPH_MeshPos_LRF_D[index].z * z_dir * Spacing; posRadD[FlexMarkerIndex] = mR4(tempPos, h); velMasD[FlexMarkerIndex] = NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB; } if (FlexIndex >= numFlex1D) { uint4 shellNodes = ShellelementsNodes[FlexIndex - numFlex1D]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y]; Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z]; Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w]; Real3 Shell_center = 0.25 * (pos_fsi_fea_D_nA + pos_fsi_fea_D_nB + pos_fsi_fea_D_nC + pos_fsi_fea_D_nD); Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - Shell_center; Real3 x_dir = ((pos_fsi_fea_D_nB - pos_fsi_fea_D_nA) + (pos_fsi_fea_D_nC - pos_fsi_fea_D_nD)); Real3 n1 = normalize(cross(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA, pos_fsi_fea_D_nC - pos_fsi_fea_D_nB)); Real3 n2 = normalize(cross(pos_fsi_fea_D_nC - pos_fsi_fea_D_nB, pos_fsi_fea_D_nD - pos_fsi_fea_D_nC)); Real3 n3 = normalize(cross(pos_fsi_fea_D_nD - pos_fsi_fea_D_nC, pos_fsi_fea_D_nA - pos_fsi_fea_D_nD)); Real3 n4 = normalize(cross(pos_fsi_fea_D_nA - pos_fsi_fea_D_nD, pos_fsi_fea_D_nB - pos_fsi_fea_D_nA)); Real3 Normal = normalize(n1 + n2 + n3 + n4); Real3 y_dir = cross(Normal, x_dir); Real Shell_x = 0.25 * length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nD); Real Shell_y = 0.25 * length(pos_fsi_fea_D_nD - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nB); Real3 physic_to_natural = mR3(1 / Shell_x, 1 / Shell_y, 1); Real3 pos_physical = dist3; Real3 pos_natural = mR3(pos_physical.x * physic_to_natural.x, pos_physical.y * physic_to_natural.y, pos_physical.z * physic_to_natural.z); Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y); Real NA = N_shell.x; Real NB = N_shell.y; Real NC = N_shell.z; Real ND = N_shell.w; Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[shellNodes.x]; Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[shellNodes.y]; Real3 vel_fsi_fea_D_nC = vel_fsi_fea_D[shellNodes.z]; Real3 vel_fsi_fea_D_nD = vel_fsi_fea_D[shellNodes.w]; Real h = posRadD[FlexMarkerIndex].w; Real3 tempPos = NA * pos_fsi_fea_D_nA + NB * pos_fsi_fea_D_nB + NC * pos_fsi_fea_D_nC + ND * pos_fsi_fea_D_nD + Normal * FlexSPH_MeshPos_LRF_D[index].z * Spacing; posRadD[FlexMarkerIndex] = mR4(tempPos, h); velMasD[FlexMarkerIndex] = NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB + NC * vel_fsi_fea_D_nC + ND * vel_fsi_fea_D_nD; } } //-------------------------------------------------------------------------------------------------------------------------------- ChBce::ChBce(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, bool verb) : sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), paramsH(otherParamsH), numObjectsH(otherNumObjects), verbose(verb) { totalSurfaceInteractionRigid4.resize(0); torqueMarkersD.resize(0); dummyIdentify.resize(0); } ChBce::~ChBce() {} //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::Initialize(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, std::vector<int> fsiBodyBceNum, std::vector<int> fsiShellBceNum, std::vector<int> fsiCableBceNum) { hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); CopyParams_NumberOfObjects(paramsH, numObjectsH); totalSurfaceInteractionRigid4.resize(numObjectsH->numRigidBodies); dummyIdentify.resize(numObjectsH->numRigidBodies); torqueMarkersD.resize(numObjectsH->numRigidMarkers); // Resizing the arrays used to modify the BCE velocity and pressure according to ADAMI int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0; int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0; int haveRigid = (numObjectsH->numRigidBodies > 0) ? 1 : 0; int haveFlex1D = (numObjectsH->numFlexBodies1D > 0) ? 1 : 0; int haveFlex2D = (numObjectsH->numFlexBodies2D > 0) ? 1 : 0; int numFlexAndRigidAndBoundaryMarkers = fsiGeneralData->referenceArray[2 + haveHelper + haveGhost + haveRigid + haveFlex1D + haveFlex2D - 1].y - fsiGeneralData->referenceArray[haveHelper + haveGhost].y; if (verbose) { printf("Total number of BCE particles = %d\n", numFlexAndRigidAndBoundaryMarkers); if (paramsH->bceType == BceVersion::ADAMI) { printf("Boundary condition for rigid body is: ADAMI\n"); } if (paramsH->bceType == BceVersion::ORIGINAL) { printf("Boundary condition for rigid body is: ORIGINAL\n"); } if (paramsH->bceTypeWall == BceVersion::ADAMI) { printf("Boundary condition for fixed wall is: ADAMI\n"); } if (paramsH->bceTypeWall == BceVersion::ORIGINAL) { printf("Boundary condition for fixed wall is: ORIGINAL\n"); } } if ((numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers) != numFlexAndRigidAndBoundaryMarkers) { throw std::runtime_error("Error! number of flex and rigid and boundary markers are saved incorrectly!\n"); } velMas_ModifiedBCE.resize(numFlexAndRigidAndBoundaryMarkers); rhoPreMu_ModifiedBCE.resize(numFlexAndRigidAndBoundaryMarkers); // Populate local position of BCE markers if (haveRigid) Populate_RigidSPH_MeshPos_LRF(sphMarkersD, fsiBodiesD, fsiBodyBceNum); if (haveFlex1D || haveFlex2D) Populate_FlexSPH_MeshPos_LRF(sphMarkersD, fsiMeshD, fsiShellBceNum, fsiCableBceNum); } void ChBce::Populate_RigidSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::vector<int> fsiBodyBceNum) { // Create map between a BCE on a rigid body and the associated body ID { uint start_bce = 0; for (int irigid = 0; irigid < fsiBodyBceNum.size(); irigid++) { uint end_bce = start_bce + fsiBodyBceNum[irigid]; thrust::fill(fsiGeneralData->rigidIdentifierD.begin() + start_bce, fsiGeneralData->rigidIdentifierD.begin() + end_bce, irigid); start_bce = end_bce; } } uint nBlocks_numRigid_SphMarkers; uint nThreads_SphMarkers; computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks_numRigid_SphMarkers, nThreads_SphMarkers); hipLaunchKernelGGL(( Populate_RigidSPH_MeshPos_LRF_kernel), dim3(nBlocks_numRigid_SphMarkers), dim3(nThreads_SphMarkers), 0, 0, mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR4CAST(sphMarkersD->posRadD), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR4CAST(fsiBodiesD->q_fsiBodies_D)); hipDeviceSynchronize(); cudaCheckError(); UpdateRigidMarkersPositionVelocity(sphMarkersD, fsiBodiesD); } void ChBce::Populate_FlexSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD, std::vector<int> fsiShellBceNum, std::vector<int> fsiCableBceNum) { // Create map between a BCE on a flex body and the associated flex body ID { uint start_bce = 0; for (uint icable = 0; icable < fsiCableBceNum.size(); icable++) { uint end_bce = start_bce + fsiCableBceNum[icable]; thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce, fsiGeneralData->FlexIdentifierD.begin() + end_bce, icable); start_bce = end_bce; } for (uint ishell = 0; ishell < fsiShellBceNum.size(); ishell++) { uint end_bce = start_bce + fsiShellBceNum[ishell]; thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce, fsiGeneralData->FlexIdentifierD.begin() + end_bce, ishell + fsiCableBceNum.size()); start_bce = end_bce; } } #if 0 for (uint i = 0; i < fsiGeneralData->FlexIdentifierD.size(); i++) std::cout << i << " " << fsiGeneralData->FlexIdentifierD[i] << std::endl; #endif uint nBlocks_numFlex_SphMarkers; uint nThreads_SphMarkers; computeGridSize((uint)numObjectsH->numFlexMarkers, 256, nBlocks_numFlex_SphMarkers, nThreads_SphMarkers); thrust::device_vector<Real3> FlexSPH_MeshPos_LRF_H = fsiGeneralData->FlexSPH_MeshPos_LRF_H; hipLaunchKernelGGL(( Populate_FlexSPH_MeshPos_LRF_kernel), dim3(nBlocks_numFlex_SphMarkers), dim3(nThreads_SphMarkers), 0, 0, mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(FlexSPH_MeshPos_LRF_H), mR4CAST(sphMarkersD->posRadD), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes), mR3CAST(fsiMeshD->pos_fsi_fea_D), paramsH->HSML * paramsH->MULT_INITSPACE_Shells); hipDeviceSynchronize(); cudaCheckError(); UpdateFlexMarkersPositionVelocity(sphMarkersD, fsiMeshD); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::RecalcSortedVelocityPressure_BCE(std::shared_ptr<FsiBodiesDataD> fsiBodiesD, thrust::device_vector<Real3>& velMas_ModifiedBCE, thrust::device_vector<Real4>& rhoPreMu_ModifiedBCE, const thrust::device_vector<Real4>& sortedPosRad, const thrust::device_vector<Real3>& sortedVelMas, const thrust::device_vector<Real4>& sortedRhoPreMu, const thrust::device_vector<uint>& cellStart, const thrust::device_vector<uint>& cellEnd, const thrust::device_vector<uint>& mapOriginalToSorted, const thrust::device_vector<Real3>& bceAcc, int3 updatePortion) { bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); // thread per particle uint numThreads, numBlocks; int2 newPortion = mI2(updatePortion.x, updatePortion.z); if (paramsH->bceTypeWall == BceVersion::ORIGINAL) { // Only implement ADAMI BC for rigid body boundary. // Implement a simple BC for fixed wall to avoid unnecessary cost. newPortion = mI2(updatePortion.y, updatePortion.z); } uint numBCE = newPortion.y - newPortion.x; computeGridSize(numBCE, 256, numBlocks, numThreads); hipLaunchKernelGGL(( new_BCE_VelocityPressure), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(fsiBodiesD->velMassRigid_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(velMas_ModifiedBCE), mR4CAST(rhoPreMu_ModifiedBCE), mR4CAST(sortedPosRad), mR3CAST(sortedVelMas), mR4CAST(sortedRhoPreMu), U1CAST(cellStart), U1CAST(cellEnd), U1CAST(mapOriginalToSorted), mR3CAST(bceAcc), newPortion, isErrorD); hipDeviceSynchronize(); cudaCheckError() //------------------------------------------------------------------------ hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in new_BCE_VelocityPressure!\n"); } hipFree(isErrorD); free(isErrorH); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::CalcBceAcceleration(thrust::device_vector<Real3>& bceAcc, const thrust::device_vector<Real4>& q_fsiBodies_D, const thrust::device_vector<Real3>& accRigid_fsiBodies_D, const thrust::device_vector<Real3>& omegaVelLRF_fsiBodies_D, const thrust::device_vector<Real3>& omegaAccLRF_fsiBodies_D, const thrust::device_vector<Real3>& rigidSPH_MeshPos_LRF_D, const thrust::device_vector<uint>& rigidIdentifierD, int numRigidMarkers) { // thread per particle uint numThreads, numBlocks; computeGridSize(numRigidMarkers, 256, numBlocks, numThreads); hipLaunchKernelGGL(( calcBceAcceleration_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(bceAcc), mR4CAST(q_fsiBodies_D), mR3CAST(accRigid_fsiBodies_D), mR3CAST(omegaVelLRF_fsiBodies_D), mR3CAST(omegaAccLRF_fsiBodies_D), mR3CAST(rigidSPH_MeshPos_LRF_D), U1CAST(rigidIdentifierD)); hipDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::ModifyBceVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD) { // modify BCE velocity and pressure int numRigidAndBoundaryMarkers = fsiGeneralData->referenceArray[2].y - fsiGeneralData->referenceArray[0].y; if (numObjectsH->numRigidBodies == 0) numRigidAndBoundaryMarkers = fsiGeneralData->referenceArray[1].y - fsiGeneralData->referenceArray[0].y; if ((numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers) != numRigidAndBoundaryMarkers) { throw std::runtime_error( "Error! number of rigid and boundary markers are " "saved incorrectly. Thrown from ModifyBceVelocity!\n"); } if (!(velMas_ModifiedBCE.size() == numRigidAndBoundaryMarkers && rhoPreMu_ModifiedBCE.size() == numRigidAndBoundaryMarkers)) { throw std::runtime_error( "Error! size error velMas_ModifiedBCE and " "rhoPreMu_ModifiedBCE. Thrown from ModifyBceVelocity!\n"); } int3 updatePortion = mI3(fsiGeneralData->referenceArray[0].y, fsiGeneralData->referenceArray[1].y, fsiGeneralData->referenceArray[2].y); if (numObjectsH->numRigidBodies == 0) updatePortion.z = fsiGeneralData->referenceArray[1].y; if (paramsH->bceType == BceVersion::ADAMI) { thrust::device_vector<Real3> bceAcc(numObjectsH->numRigidMarkers); if (numObjectsH->numRigidMarkers > 0) { CalcBceAcceleration(bceAcc, fsiBodiesD->q_fsiBodies_D, fsiBodiesD->accRigid_fsiBodies_D, fsiBodiesD->omegaVelLRF_fsiBodies_D, fsiBodiesD->omegaAccLRF_fsiBodies_D, fsiGeneralData->rigidSPH_MeshPos_LRF_D, fsiGeneralData->rigidIdentifierD, (int)numObjectsH->numRigidMarkers); } // ADAMI BC for rigid body, ORIGINAL BC for fixed wall if (paramsH->bceTypeWall == BceVersion::ORIGINAL) { thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.y, velMas_ModifiedBCE.begin()); thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x, sphMarkersD->rhoPresMuD.begin() + updatePortion.y, rhoPreMu_ModifiedBCE.begin()); if (numObjectsH->numRigidMarkers > 0) { RecalcSortedVelocityPressure_BCE( fsiBodiesD, velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD, markersProximityD->cellStartD, markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted, bceAcc, updatePortion); } } // ADAMI BC for both rigid body and fixed wall else if (paramsH->bceTypeWall == BceVersion::ADAMI) { RecalcSortedVelocityPressure_BCE( fsiBodiesD, velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD, markersProximityD->cellStartD, markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted, bceAcc, updatePortion); } bceAcc.clear(); } else { thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.z, velMas_ModifiedBCE.begin()); thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x, sphMarkersD->rhoPresMuD.begin() + updatePortion.z, rhoPreMu_ModifiedBCE.begin()); } } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::Rigid_Forces_Torques(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD) { if (numObjectsH->numRigidBodies == 0) { return; } thrust::fill(fsiGeneralData->rigid_FSI_ForcesD.begin(), fsiGeneralData->rigid_FSI_ForcesD.end(), mR3(0)); thrust::fill(fsiGeneralData->rigid_FSI_TorquesD.begin(), fsiGeneralData->rigid_FSI_TorquesD.end(), mR3(0)); uint nBlocks_numRigid_SphMarkers; uint nThreads_SphMarkers; computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks_numRigid_SphMarkers, nThreads_SphMarkers); hipLaunchKernelGGL(( Calc_Rigid_FSI_ForcesD_TorquesD), dim3(nBlocks_numRigid_SphMarkers), dim3(nThreads_SphMarkers), 0, 0, mR3CAST(fsiGeneralData->rigid_FSI_ForcesD), mR3CAST(fsiGeneralData->rigid_FSI_TorquesD), mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old), mR4CAST(sphMarkersD->posRadD), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR4CAST(fsiBodiesD->q_fsiBodies_D)); hipDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::Flex_Forces(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD) { if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0) { return; } thrust::fill(fsiGeneralData->Flex_FSI_ForcesD.begin(), fsiGeneralData->Flex_FSI_ForcesD.end(), mR3(0)); uint nBlocks_numFlex_SphMarkers; uint nThreads_SphMarkers; computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks_numFlex_SphMarkers, nThreads_SphMarkers); hipLaunchKernelGGL(( Calc_Flex_FSI_ForcesD), dim3(nBlocks_numFlex_SphMarkers), dim3(nThreads_SphMarkers), 0, 0, mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes), mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old), mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiGeneralData->Flex_FSI_ForcesD)); hipDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::UpdateRigidMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD) { if (numObjectsH->numRigidBodies == 0) { return; } uint nBlocks_numRigid_SphMarkers; uint nThreads_SphMarkers; computeGridSize((int)numObjectsH->numRigidMarkers, 256, nBlocks_numRigid_SphMarkers, nThreads_SphMarkers); hipLaunchKernelGGL(( UpdateRigidMarkersPositionVelocityD), dim3(nBlocks_numRigid_SphMarkers), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR4CAST(fsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(fsiBodiesD->omegaVelLRF_fsiBodies_D), mR4CAST(fsiBodiesD->q_fsiBodies_D)); hipDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::UpdateFlexMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD) { if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0) { return; } uint nBlocks_numFlex_SphMarkers; uint nThreads_SphMarkers; printf("UpdateFlexMarkersPositionVelocity..\n"); computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks_numFlex_SphMarkers, nThreads_SphMarkers); hipLaunchKernelGGL(( UpdateFlexMarkersPositionVelocityAccD), dim3(nBlocks_numFlex_SphMarkers), dim3(nThreads_SphMarkers), 0, 0, mR4CAST(sphMarkersD->posRadD), mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(sphMarkersD->velMasD), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes), mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiMeshD->vel_fsi_fea_D), paramsH->HSML * paramsH->MULT_INITSPACE_Shells); hipDeviceSynchronize(); cudaCheckError(); } } // end namespace fsi } // end namespace chrono
74fe3c70e041e23095b4c82bfb379a63cd244a64.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Arman Pazouki, Milad Rakhsha, Wei Hu // ============================================================================= // // Base class for processing boundary condition enforcing (bce) markers forces // in FSI system. // ============================================================================= #include "chrono_fsi/physics/ChBce.cuh" //for FsiGeneralData #include "chrono_fsi/physics/ChSphGeneral.cuh" #include <type_traits> namespace chrono { namespace fsi { //-------------------------------------------------------------------------------------------------------------------------------- __device__ double atomicAdd_double(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Populate_RigidSPH_MeshPos_LRF_kernel(Real3* rigidSPH_MeshPos_LRF_D, Real4* posRadD, uint* rigidIdentifierD, Real3* posRigidD, Real4* qD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numRigidMarkers) { return; } int rigidIndex = rigidIdentifierD[index]; uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers; Real4 q4 = qD[rigidIndex]; Real3 a1, a2, a3; RotationMatirixFromQuaternion(a1, a2, a3, q4); Real3 dist3 = mR3(posRadD[rigidMarkerIndex]) - posRigidD[rigidIndex]; Real3 dist3LF = InverseRotate_By_RotationMatrix_DeviceHost(a1, a2, a3, dist3); rigidSPH_MeshPos_LRF_D[index] = dist3LF; } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Populate_FlexSPH_MeshPos_LRF_kernel(Real3* FlexSPH_MeshPos_LRF_D, Real3* FlexSPH_MeshPos_LRF_H, Real4* posRadD, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodes, uint4* ShellElementsNodes, Real3* pos_fsi_fea_D, Real Spacing) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numFlexMarkers) { return; } int FlexIndex = FlexIdentifierD[index]; uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers; if (FlexIndex < numFlex1D) { uint2 cableNodes = CableElementsNodes[FlexIndex]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[cableNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[cableNodes.y]; Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - pos_fsi_fea_D_nA; Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA; Real Cable_x = length(x_dir); x_dir = x_dir / length(x_dir); Real norm_dir_length = length(cross(dist3, x_dir)); Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y); y_dir = y_dir / length(y_dir); Real3 z_dir = cross(x_dir, y_dir); Real dx = dot(dist3, x_dir); Real dy = dot(dist3, y_dir); Real dz = dot(dist3, z_dir); if (abs(dy) > 0) dy /= Spacing; if (abs(dz) > 0) dz /= Spacing; FlexSPH_MeshPos_LRF_D[index] = mR3(dx / Cable_x, dy, dz); } if (FlexIndex >= numFlex1D) { uint4 shellNodes = ShellElementsNodes[FlexIndex - numFlex1D]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y]; Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z]; Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w]; Real3 Shell_center = 0.25 * (pos_fsi_fea_D_nA + pos_fsi_fea_D_nB + pos_fsi_fea_D_nC + pos_fsi_fea_D_nD); Real Shell_x = 0.25 * length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nD); Real Shell_y = 0.25 * length(pos_fsi_fea_D_nD - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nB); Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - Shell_center; Real3 physic_to_natural = mR3(1.0 / Shell_x, 1.0 / Shell_y, 1); Real3 pos_physical = FlexSPH_MeshPos_LRF_H[index]; Real3 pos_natural = mR3(pos_physical.x * physic_to_natural.x, pos_physical.y * physic_to_natural.y, pos_physical.z * physic_to_natural.z); Real3 n1 = normalize(cross(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA, pos_fsi_fea_D_nC - pos_fsi_fea_D_nB)); Real3 n2 = normalize(cross(pos_fsi_fea_D_nC - pos_fsi_fea_D_nB, pos_fsi_fea_D_nD - pos_fsi_fea_D_nC)); Real3 n3 = normalize(cross(pos_fsi_fea_D_nD - pos_fsi_fea_D_nC, pos_fsi_fea_D_nA - pos_fsi_fea_D_nD)); Real3 n4 = normalize(cross(pos_fsi_fea_D_nA - pos_fsi_fea_D_nD, pos_fsi_fea_D_nB - pos_fsi_fea_D_nA)); Real3 Normal = normalize(n1 + n2 + n3 + n4); Real zSide = dot(Normal, dist3) / Spacing; FlexSPH_MeshPos_LRF_D[index] = FlexSPH_MeshPos_LRF_H[index]; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Rigid_FSI_ForcesD_TorquesD(Real3* rigid_FSI_ForcesD, Real3* rigid_FSI_TorquesD, Real4* derivVelRhoD, Real4* derivVelRhoD_old, Real4* posRadD, uint* rigidIdentifierD, Real3* posRigidD, Real3* rigidSPH_MeshPos_LRF_D, Real4* qD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numRigidMarkers) { return; } int RigidIndex = rigidIdentifierD[index]; uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers; derivVelRhoD[rigidMarkerIndex] = (derivVelRhoD[rigidMarkerIndex] * paramsD.Beta + derivVelRhoD_old[rigidMarkerIndex] * (1 - paramsD.Beta)) * paramsD.markerMass; if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].x), derivVelRhoD[rigidMarkerIndex].x); atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].y), derivVelRhoD[rigidMarkerIndex].y); atomicAdd_double((double*)&(rigid_FSI_ForcesD[RigidIndex].z), derivVelRhoD[rigidMarkerIndex].z); } else { atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].x), derivVelRhoD[rigidMarkerIndex].x); atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].y), derivVelRhoD[rigidMarkerIndex].y); atomicAdd((float*)&(rigid_FSI_ForcesD[RigidIndex].z), derivVelRhoD[rigidMarkerIndex].z); } Real3 dist3 = Distance(mR3(posRadD[rigidMarkerIndex]), posRigidD[RigidIndex]); Real3 mtorque = cross(dist3, mR3(derivVelRhoD[rigidMarkerIndex])); if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x); atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y); atomicAdd_double((double*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z); } else { atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].x), mtorque.x); atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].y), mtorque.y); atomicAdd((float*)&(rigid_FSI_TorquesD[RigidIndex].z), mtorque.z); } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void Calc_Flex_FSI_ForcesD(Real3* FlexSPH_MeshPos_LRF_D, uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodes, // This is the connectivity of FEA mesh. uint4* ShellElementsNodes, // This is the connectivity of FEA mesh. Real4* derivVelRhoD, Real4* derivVelRhoD_old, Real3* pos_fsi_fea_D, Real3* Flex_FSI_ForcesD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numFlexMarkers) { return; } int FlexIndex = FlexIdentifierD[index]; uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers; derivVelRhoD[FlexMarkerIndex] = (derivVelRhoD[FlexMarkerIndex] * paramsD.Beta + derivVelRhoD_old[FlexMarkerIndex] * (1 - paramsD.Beta)) * paramsD.markerMass; if (FlexIndex < numFlex1D) { Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x); Real NA = N_cable.x; Real NB = N_cable.y; int nA = CableElementsNodes[FlexIndex].x; int nB = CableElementsNodes[FlexIndex].y; if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); } else { atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); } } if (FlexIndex >= numFlex1D) { Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y); Real NA = N_shell.x; Real NB = N_shell.y; Real NC = N_shell.z; Real ND = N_shell.w; int nA = ShellElementsNodes[FlexIndex - numFlex1D].x; int nB = ShellElementsNodes[FlexIndex - numFlex1D].y; int nC = ShellElementsNodes[FlexIndex - numFlex1D].z; int nD = ShellElementsNodes[FlexIndex - numFlex1D].w; if (std::is_same<Real, double>::value) { atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].x), NC * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].y), NC * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nC].z), NC * derivVelRhoD[FlexMarkerIndex].z); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].x), ND * derivVelRhoD[FlexMarkerIndex].x); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].y), ND * derivVelRhoD[FlexMarkerIndex].y); atomicAdd_double((double*)&(Flex_FSI_ForcesD[nD].z), ND * derivVelRhoD[FlexMarkerIndex].z); } else { atomicAdd((float*)&(Flex_FSI_ForcesD[nA].x), NA * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].y), NA * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nA].z), NA * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].x), NB * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].y), NB * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nB].z), NB * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nC].x), NC * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nC].y), NC * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nC].z), NC * derivVelRhoD[FlexMarkerIndex].z); atomicAdd((float*)&(Flex_FSI_ForcesD[nD].x), ND * derivVelRhoD[FlexMarkerIndex].x); atomicAdd((float*)&(Flex_FSI_ForcesD[nD].y), ND * derivVelRhoD[FlexMarkerIndex].y); atomicAdd((float*)&(Flex_FSI_ForcesD[nD].z), ND * derivVelRhoD[FlexMarkerIndex].z); } } } //-------------------------------------------------------------------------------------------------------------------------------- // collide a particle against all other particles in a given cell __device__ void BCE_modification_Share(Real3& sumVW, Real3& sumRhoRW, Real& sumPW, Real& sumWFluid, int& isAffectedV, int& isAffectedP, int3 gridPos, Real3 posRadA, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd) { uint gridHash = calcGridHash(gridPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real3 dist3 = Distance(posRadA, posRadB); Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z; Real4 rhoPresMuB = sortedRhoPreMu[j]; if (dd > RESOLUTION_LENGTH_MULT * paramsD.HSML * RESOLUTION_LENGTH_MULT * paramsD.HSML || rhoPresMuB.w > -0.5) continue; Real d = length(dist3); Real Wd = W3h(d, sortedPosRad[j].w); Real3 velMasB = sortedVelMas[j]; sumVW += velMasB * Wd; sumRhoRW += rhoPresMuB.x * dist3 * Wd; sumPW += rhoPresMuB.y * Wd; sumWFluid += Wd; } } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void new_BCE_VelocityPressure(Real4* velMassRigid_fsiBodies_D, uint* rigidIdentifierD, Real3* velMas_ModifiedBCE, Real4* rhoPreMu_ModifiedBCE, Real4* sortedPosRad, // input: sorted positions Real3* sortedVelMas, // input: sorted velocities Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd, uint* mapOriginalToSorted, Real3* bceAcc, int2 newPortion, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; uint sphIndex = index + newPortion.x; if (index >= newPortion.y - newPortion.x) { return; } uint bceIndex = index; if (paramsD.bceTypeWall == BceVersion::ORIGINAL) bceIndex = index + numObjectsD.numBoundaryMarkers; uint idA = mapOriginalToSorted[sphIndex]; Real4 rhoPreMuA = sortedRhoPreMu[idA]; Real3 posRadA = mR3(sortedPosRad[idA]); Real3 velMasA = sortedVelMas[idA]; int isAffectedV = 0; int isAffectedP = 0; Real3 sumVW = mR3(0); Real3 sumRhoRW = mR3(0); Real sumPW = 0; Real sumWFluid = 0; // get address in grid int3 gridPos = calcGridPos(posRadA); // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); BCE_modification_Share(sumVW, sumRhoRW, sumPW, sumWFluid, isAffectedV, isAffectedP, neighbourPos, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd); } } } if (abs(sumWFluid) > EPSILON) { // modify velocity Real3 modifiedBCE_v = 2 * velMasA - sumVW / sumWFluid; velMas_ModifiedBCE[bceIndex] = modifiedBCE_v; // modify pressure Real3 aW = mR3(0.0); if (rhoPreMuA.w > 0.5 && rhoPreMuA.w < 1.5) { // only need acceleration of rigid body's BCE particle int rigidBceIndex = sphIndex - numObjectsD.startRigidMarkers; if (rigidBceIndex < 0 || rigidBceIndex >= numObjectsD.numRigidMarkers) { printf( "Error! particle index out of bound: thrown from " "ChBce.cu, new_BCE_VelocityPressure !\n"); *isErrorD = true; return; } aW = bceAcc[rigidBceIndex]; } Real pressure = (sumPW + dot(paramsD.gravity - aW, sumRhoRW)) / sumWFluid; Real density = InvEos(pressure); rhoPreMu_ModifiedBCE[bceIndex] = mR4(density, pressure, rhoPreMuA.z, rhoPreMuA.w); } else { rhoPreMu_ModifiedBCE[bceIndex] = mR4(paramsD.rho0, paramsD.BASEPRES, paramsD.mu0, rhoPreMuA.w); velMas_ModifiedBCE[bceIndex] = mR3(0.0); } sortedVelMas[idA] = velMas_ModifiedBCE[bceIndex]; sortedRhoPreMu[idA] = rhoPreMu_ModifiedBCE[bceIndex]; } //-------------------------------------------------------------------------------------------------------------------------------- // calculate BCE particle's acceleration, required in ADAMI __global__ void calcBceAcceleration_kernel(Real3* bceAcc, Real4* q_fsiBodies_D, Real3* accRigid_fsiBodies_D, Real3* omegaVelLRF_fsiBodies_D, Real3* omegaAccLRF_fsiBodies_D, Real3* rigidSPH_MeshPos_LRF_D, const uint* rigidIdentifierD) { uint bceIndex = blockIdx.x * blockDim.x + threadIdx.x; if (bceIndex >= numObjectsD.numRigidMarkers) { return; } int rigidBodyIndex = rigidIdentifierD[bceIndex]; Real3 acc3 = accRigid_fsiBodies_D[rigidBodyIndex]; // linear acceleration (CM) Real4 q4 = q_fsiBodies_D[rigidBodyIndex]; Real3 a1, a2, a3; RotationMatirixFromQuaternion(a1, a2, a3, q4); Real3 wVel3 = omegaVelLRF_fsiBodies_D[rigidBodyIndex]; Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[bceIndex]; Real3 wVelCrossS = cross(wVel3, rigidSPH_MeshPos_LRF); Real3 wVelCrossWVelCrossS = cross(wVel3, wVelCrossS); acc3 += mR3(dot(a1, wVelCrossWVelCrossS), dot(a2, wVelCrossWVelCrossS), dot(a3, wVelCrossWVelCrossS)); // centrigugal acceleration Real3 wAcc3 = omegaAccLRF_fsiBodies_D[rigidBodyIndex]; Real3 wAccCrossS = cross(wAcc3, rigidSPH_MeshPos_LRF); acc3 += mR3(dot(a1, wAccCrossS), dot(a2, wAccCrossS), dot(a3, wAccCrossS)); // tangential acceleration bceAcc[bceIndex] = acc3; } //-------------------------------------------------------------------------------------------------------------------------------- // updates the rigid body particles __global__ void UpdateRigidMarkersPositionVelocityD(Real4* posRadD, Real3* velMasD, Real3* rigidSPH_MeshPos_LRF_D, uint* rigidIdentifierD, Real3* posRigidD, Real4* velMassRigidD, Real3* omegaLRF_D, Real4* qD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numRigidMarkers) { return; } uint rigidMarkerIndex = index + numObjectsD.startRigidMarkers; int rigidBodyIndex = rigidIdentifierD[index]; Real4 q4 = qD[rigidBodyIndex]; Real3 a1, a2, a3; RotationMatirixFromQuaternion(a1, a2, a3, q4); Real3 rigidSPH_MeshPos_LRF = rigidSPH_MeshPos_LRF_D[index]; // position Real h = posRadD[rigidMarkerIndex].w; Real3 p_Rigid = posRigidD[rigidBodyIndex]; Real3 pos = p_Rigid + mR3(dot(a1, rigidSPH_MeshPos_LRF), dot(a2, rigidSPH_MeshPos_LRF), dot(a3, rigidSPH_MeshPos_LRF)); posRadD[rigidMarkerIndex] = mR4(pos, h); // velocity Real4 vM_Rigid = velMassRigidD[rigidBodyIndex]; Real3 omega3 = omegaLRF_D[rigidBodyIndex]; Real3 omegaCrossS = cross(omega3, rigidSPH_MeshPos_LRF); velMasD[rigidMarkerIndex] = mR3(vM_Rigid) + mR3(dot(a1, omegaCrossS), dot(a2, omegaCrossS), dot(a3, omegaCrossS)); } //-------------------------------------------------------------------------------------------------------------------------------- __global__ void UpdateFlexMarkersPositionVelocityAccD(Real4* posRadD, Real3* FlexSPH_MeshPos_LRF_D, Real3* velMasD, const uint* FlexIdentifierD, const int numFlex1D, uint2* CableElementsNodes, uint4* ShellelementsNodes, Real3* pos_fsi_fea_D, Real3* vel_fsi_fea_D, Real Spacing) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numFlexMarkers) { return; } int FlexIndex = FlexIdentifierD[index]; uint FlexMarkerIndex = index + numObjectsD.startFlexMarkers; if (FlexIndex < numFlex1D) { uint2 CableNodes = CableElementsNodes[FlexIndex]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[CableNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[CableNodes.y]; Real3 x_dir = pos_fsi_fea_D_nB - pos_fsi_fea_D_nA; Real L = length(x_dir); x_dir = x_dir / length(x_dir); Real3 y_dir = mR3(-x_dir.y, x_dir.x, 0) + mR3(-x_dir.z, 0, x_dir.x) + mR3(0, -x_dir.z, x_dir.y); y_dir = y_dir / length(y_dir); Real3 z_dir = cross(x_dir, y_dir); Real2 N_cable = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x); Real NA = N_cable.x; Real NB = N_cable.y; Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[CableNodes.x]; Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[CableNodes.y]; Real3 physic_to_natural = mR3(1 / L, 1, 1); Real3 pos_natural = mR3(FlexSPH_MeshPos_LRF_D[index].x * physic_to_natural.x, FlexSPH_MeshPos_LRF_D[index].y * physic_to_natural.y, FlexSPH_MeshPos_LRF_D[index].z * physic_to_natural.z); Real2 Nnew = Cables_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x); Real h = posRadD[FlexMarkerIndex].w; Real3 tempPos = Nnew.x * pos_fsi_fea_D_nA + Nnew.y * pos_fsi_fea_D_nB + FlexSPH_MeshPos_LRF_D[index].y * y_dir * Spacing + FlexSPH_MeshPos_LRF_D[index].z * z_dir * Spacing; posRadD[FlexMarkerIndex] = mR4(tempPos, h); velMasD[FlexMarkerIndex] = NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB; } if (FlexIndex >= numFlex1D) { uint4 shellNodes = ShellelementsNodes[FlexIndex - numFlex1D]; Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[shellNodes.x]; Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[shellNodes.y]; Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[shellNodes.z]; Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[shellNodes.w]; Real3 Shell_center = 0.25 * (pos_fsi_fea_D_nA + pos_fsi_fea_D_nB + pos_fsi_fea_D_nC + pos_fsi_fea_D_nD); Real3 dist3 = mR3(posRadD[FlexMarkerIndex]) - Shell_center; Real3 x_dir = ((pos_fsi_fea_D_nB - pos_fsi_fea_D_nA) + (pos_fsi_fea_D_nC - pos_fsi_fea_D_nD)); Real3 n1 = normalize(cross(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA, pos_fsi_fea_D_nC - pos_fsi_fea_D_nB)); Real3 n2 = normalize(cross(pos_fsi_fea_D_nC - pos_fsi_fea_D_nB, pos_fsi_fea_D_nD - pos_fsi_fea_D_nC)); Real3 n3 = normalize(cross(pos_fsi_fea_D_nD - pos_fsi_fea_D_nC, pos_fsi_fea_D_nA - pos_fsi_fea_D_nD)); Real3 n4 = normalize(cross(pos_fsi_fea_D_nA - pos_fsi_fea_D_nD, pos_fsi_fea_D_nB - pos_fsi_fea_D_nA)); Real3 Normal = normalize(n1 + n2 + n3 + n4); Real3 y_dir = cross(Normal, x_dir); Real Shell_x = 0.25 * length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nD); Real Shell_y = 0.25 * length(pos_fsi_fea_D_nD - pos_fsi_fea_D_nA + pos_fsi_fea_D_nC - pos_fsi_fea_D_nB); Real3 physic_to_natural = mR3(1 / Shell_x, 1 / Shell_y, 1); Real3 pos_physical = dist3; Real3 pos_natural = mR3(pos_physical.x * physic_to_natural.x, pos_physical.y * physic_to_natural.y, pos_physical.z * physic_to_natural.z); Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_LRF_D[index].x, FlexSPH_MeshPos_LRF_D[index].y); Real NA = N_shell.x; Real NB = N_shell.y; Real NC = N_shell.z; Real ND = N_shell.w; Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[shellNodes.x]; Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[shellNodes.y]; Real3 vel_fsi_fea_D_nC = vel_fsi_fea_D[shellNodes.z]; Real3 vel_fsi_fea_D_nD = vel_fsi_fea_D[shellNodes.w]; Real h = posRadD[FlexMarkerIndex].w; Real3 tempPos = NA * pos_fsi_fea_D_nA + NB * pos_fsi_fea_D_nB + NC * pos_fsi_fea_D_nC + ND * pos_fsi_fea_D_nD + Normal * FlexSPH_MeshPos_LRF_D[index].z * Spacing; posRadD[FlexMarkerIndex] = mR4(tempPos, h); velMasD[FlexMarkerIndex] = NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB + NC * vel_fsi_fea_D_nC + ND * vel_fsi_fea_D_nD; } } //-------------------------------------------------------------------------------------------------------------------------------- ChBce::ChBce(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, bool verb) : sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), paramsH(otherParamsH), numObjectsH(otherNumObjects), verbose(verb) { totalSurfaceInteractionRigid4.resize(0); torqueMarkersD.resize(0); dummyIdentify.resize(0); } ChBce::~ChBce() {} //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::Initialize(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, std::vector<int> fsiBodyBceNum, std::vector<int> fsiShellBceNum, std::vector<int> fsiCableBceNum) { cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); CopyParams_NumberOfObjects(paramsH, numObjectsH); totalSurfaceInteractionRigid4.resize(numObjectsH->numRigidBodies); dummyIdentify.resize(numObjectsH->numRigidBodies); torqueMarkersD.resize(numObjectsH->numRigidMarkers); // Resizing the arrays used to modify the BCE velocity and pressure according to ADAMI int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0; int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0; int haveRigid = (numObjectsH->numRigidBodies > 0) ? 1 : 0; int haveFlex1D = (numObjectsH->numFlexBodies1D > 0) ? 1 : 0; int haveFlex2D = (numObjectsH->numFlexBodies2D > 0) ? 1 : 0; int numFlexAndRigidAndBoundaryMarkers = fsiGeneralData->referenceArray[2 + haveHelper + haveGhost + haveRigid + haveFlex1D + haveFlex2D - 1].y - fsiGeneralData->referenceArray[haveHelper + haveGhost].y; if (verbose) { printf("Total number of BCE particles = %d\n", numFlexAndRigidAndBoundaryMarkers); if (paramsH->bceType == BceVersion::ADAMI) { printf("Boundary condition for rigid body is: ADAMI\n"); } if (paramsH->bceType == BceVersion::ORIGINAL) { printf("Boundary condition for rigid body is: ORIGINAL\n"); } if (paramsH->bceTypeWall == BceVersion::ADAMI) { printf("Boundary condition for fixed wall is: ADAMI\n"); } if (paramsH->bceTypeWall == BceVersion::ORIGINAL) { printf("Boundary condition for fixed wall is: ORIGINAL\n"); } } if ((numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers + numObjectsH->numFlexMarkers) != numFlexAndRigidAndBoundaryMarkers) { throw std::runtime_error("Error! number of flex and rigid and boundary markers are saved incorrectly!\n"); } velMas_ModifiedBCE.resize(numFlexAndRigidAndBoundaryMarkers); rhoPreMu_ModifiedBCE.resize(numFlexAndRigidAndBoundaryMarkers); // Populate local position of BCE markers if (haveRigid) Populate_RigidSPH_MeshPos_LRF(sphMarkersD, fsiBodiesD, fsiBodyBceNum); if (haveFlex1D || haveFlex2D) Populate_FlexSPH_MeshPos_LRF(sphMarkersD, fsiMeshD, fsiShellBceNum, fsiCableBceNum); } void ChBce::Populate_RigidSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::vector<int> fsiBodyBceNum) { // Create map between a BCE on a rigid body and the associated body ID { uint start_bce = 0; for (int irigid = 0; irigid < fsiBodyBceNum.size(); irigid++) { uint end_bce = start_bce + fsiBodyBceNum[irigid]; thrust::fill(fsiGeneralData->rigidIdentifierD.begin() + start_bce, fsiGeneralData->rigidIdentifierD.begin() + end_bce, irigid); start_bce = end_bce; } } uint nBlocks_numRigid_SphMarkers; uint nThreads_SphMarkers; computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks_numRigid_SphMarkers, nThreads_SphMarkers); Populate_RigidSPH_MeshPos_LRF_kernel<<<nBlocks_numRigid_SphMarkers, nThreads_SphMarkers>>>( mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR4CAST(sphMarkersD->posRadD), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR4CAST(fsiBodiesD->q_fsiBodies_D)); cudaDeviceSynchronize(); cudaCheckError(); UpdateRigidMarkersPositionVelocity(sphMarkersD, fsiBodiesD); } void ChBce::Populate_FlexSPH_MeshPos_LRF(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD, std::vector<int> fsiShellBceNum, std::vector<int> fsiCableBceNum) { // Create map between a BCE on a flex body and the associated flex body ID { uint start_bce = 0; for (uint icable = 0; icable < fsiCableBceNum.size(); icable++) { uint end_bce = start_bce + fsiCableBceNum[icable]; thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce, fsiGeneralData->FlexIdentifierD.begin() + end_bce, icable); start_bce = end_bce; } for (uint ishell = 0; ishell < fsiShellBceNum.size(); ishell++) { uint end_bce = start_bce + fsiShellBceNum[ishell]; thrust::fill(fsiGeneralData->FlexIdentifierD.begin() + start_bce, fsiGeneralData->FlexIdentifierD.begin() + end_bce, ishell + fsiCableBceNum.size()); start_bce = end_bce; } } #if 0 for (uint i = 0; i < fsiGeneralData->FlexIdentifierD.size(); i++) std::cout << i << " " << fsiGeneralData->FlexIdentifierD[i] << std::endl; #endif uint nBlocks_numFlex_SphMarkers; uint nThreads_SphMarkers; computeGridSize((uint)numObjectsH->numFlexMarkers, 256, nBlocks_numFlex_SphMarkers, nThreads_SphMarkers); thrust::device_vector<Real3> FlexSPH_MeshPos_LRF_H = fsiGeneralData->FlexSPH_MeshPos_LRF_H; Populate_FlexSPH_MeshPos_LRF_kernel<<<nBlocks_numFlex_SphMarkers, nThreads_SphMarkers>>>( mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(FlexSPH_MeshPos_LRF_H), mR4CAST(sphMarkersD->posRadD), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes), mR3CAST(fsiMeshD->pos_fsi_fea_D), paramsH->HSML * paramsH->MULT_INITSPACE_Shells); cudaDeviceSynchronize(); cudaCheckError(); UpdateFlexMarkersPositionVelocity(sphMarkersD, fsiMeshD); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::RecalcSortedVelocityPressure_BCE(std::shared_ptr<FsiBodiesDataD> fsiBodiesD, thrust::device_vector<Real3>& velMas_ModifiedBCE, thrust::device_vector<Real4>& rhoPreMu_ModifiedBCE, const thrust::device_vector<Real4>& sortedPosRad, const thrust::device_vector<Real3>& sortedVelMas, const thrust::device_vector<Real4>& sortedRhoPreMu, const thrust::device_vector<uint>& cellStart, const thrust::device_vector<uint>& cellEnd, const thrust::device_vector<uint>& mapOriginalToSorted, const thrust::device_vector<Real3>& bceAcc, int3 updatePortion) { bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); // thread per particle uint numThreads, numBlocks; int2 newPortion = mI2(updatePortion.x, updatePortion.z); if (paramsH->bceTypeWall == BceVersion::ORIGINAL) { // Only implement ADAMI BC for rigid body boundary. // Implement a simple BC for fixed wall to avoid unnecessary cost. newPortion = mI2(updatePortion.y, updatePortion.z); } uint numBCE = newPortion.y - newPortion.x; computeGridSize(numBCE, 256, numBlocks, numThreads); new_BCE_VelocityPressure<<<numBlocks, numThreads>>>( mR4CAST(fsiBodiesD->velMassRigid_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(velMas_ModifiedBCE), mR4CAST(rhoPreMu_ModifiedBCE), mR4CAST(sortedPosRad), mR3CAST(sortedVelMas), mR4CAST(sortedRhoPreMu), U1CAST(cellStart), U1CAST(cellEnd), U1CAST(mapOriginalToSorted), mR3CAST(bceAcc), newPortion, isErrorD); cudaDeviceSynchronize(); cudaCheckError() //------------------------------------------------------------------------ cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) { throw std::runtime_error("Error! program crashed in new_BCE_VelocityPressure!\n"); } cudaFree(isErrorD); free(isErrorH); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::CalcBceAcceleration(thrust::device_vector<Real3>& bceAcc, const thrust::device_vector<Real4>& q_fsiBodies_D, const thrust::device_vector<Real3>& accRigid_fsiBodies_D, const thrust::device_vector<Real3>& omegaVelLRF_fsiBodies_D, const thrust::device_vector<Real3>& omegaAccLRF_fsiBodies_D, const thrust::device_vector<Real3>& rigidSPH_MeshPos_LRF_D, const thrust::device_vector<uint>& rigidIdentifierD, int numRigidMarkers) { // thread per particle uint numThreads, numBlocks; computeGridSize(numRigidMarkers, 256, numBlocks, numThreads); calcBceAcceleration_kernel<<<numBlocks, numThreads>>>( mR3CAST(bceAcc), mR4CAST(q_fsiBodies_D), mR3CAST(accRigid_fsiBodies_D), mR3CAST(omegaVelLRF_fsiBodies_D), mR3CAST(omegaAccLRF_fsiBodies_D), mR3CAST(rigidSPH_MeshPos_LRF_D), U1CAST(rigidIdentifierD)); cudaDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::ModifyBceVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD) { // modify BCE velocity and pressure int numRigidAndBoundaryMarkers = fsiGeneralData->referenceArray[2].y - fsiGeneralData->referenceArray[0].y; if (numObjectsH->numRigidBodies == 0) numRigidAndBoundaryMarkers = fsiGeneralData->referenceArray[1].y - fsiGeneralData->referenceArray[0].y; if ((numObjectsH->numBoundaryMarkers + numObjectsH->numRigidMarkers) != numRigidAndBoundaryMarkers) { throw std::runtime_error( "Error! number of rigid and boundary markers are " "saved incorrectly. Thrown from ModifyBceVelocity!\n"); } if (!(velMas_ModifiedBCE.size() == numRigidAndBoundaryMarkers && rhoPreMu_ModifiedBCE.size() == numRigidAndBoundaryMarkers)) { throw std::runtime_error( "Error! size error velMas_ModifiedBCE and " "rhoPreMu_ModifiedBCE. Thrown from ModifyBceVelocity!\n"); } int3 updatePortion = mI3(fsiGeneralData->referenceArray[0].y, fsiGeneralData->referenceArray[1].y, fsiGeneralData->referenceArray[2].y); if (numObjectsH->numRigidBodies == 0) updatePortion.z = fsiGeneralData->referenceArray[1].y; if (paramsH->bceType == BceVersion::ADAMI) { thrust::device_vector<Real3> bceAcc(numObjectsH->numRigidMarkers); if (numObjectsH->numRigidMarkers > 0) { CalcBceAcceleration(bceAcc, fsiBodiesD->q_fsiBodies_D, fsiBodiesD->accRigid_fsiBodies_D, fsiBodiesD->omegaVelLRF_fsiBodies_D, fsiBodiesD->omegaAccLRF_fsiBodies_D, fsiGeneralData->rigidSPH_MeshPos_LRF_D, fsiGeneralData->rigidIdentifierD, (int)numObjectsH->numRigidMarkers); } // ADAMI BC for rigid body, ORIGINAL BC for fixed wall if (paramsH->bceTypeWall == BceVersion::ORIGINAL) { thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.y, velMas_ModifiedBCE.begin()); thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x, sphMarkersD->rhoPresMuD.begin() + updatePortion.y, rhoPreMu_ModifiedBCE.begin()); if (numObjectsH->numRigidMarkers > 0) { RecalcSortedVelocityPressure_BCE( fsiBodiesD, velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD, markersProximityD->cellStartD, markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted, bceAcc, updatePortion); } } // ADAMI BC for both rigid body and fixed wall else if (paramsH->bceTypeWall == BceVersion::ADAMI) { RecalcSortedVelocityPressure_BCE( fsiBodiesD, velMas_ModifiedBCE, rhoPreMu_ModifiedBCE, sortedSphMarkersD->posRadD, sortedSphMarkersD->velMasD, sortedSphMarkersD->rhoPresMuD, markersProximityD->cellStartD, markersProximityD->cellEndD, markersProximityD->mapOriginalToSorted, bceAcc, updatePortion); } bceAcc.clear(); } else { thrust::copy(sphMarkersD->velMasD.begin() + updatePortion.x, sphMarkersD->velMasD.begin() + updatePortion.z, velMas_ModifiedBCE.begin()); thrust::copy(sphMarkersD->rhoPresMuD.begin() + updatePortion.x, sphMarkersD->rhoPresMuD.begin() + updatePortion.z, rhoPreMu_ModifiedBCE.begin()); } } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::Rigid_Forces_Torques(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD) { if (numObjectsH->numRigidBodies == 0) { return; } thrust::fill(fsiGeneralData->rigid_FSI_ForcesD.begin(), fsiGeneralData->rigid_FSI_ForcesD.end(), mR3(0)); thrust::fill(fsiGeneralData->rigid_FSI_TorquesD.begin(), fsiGeneralData->rigid_FSI_TorquesD.end(), mR3(0)); uint nBlocks_numRigid_SphMarkers; uint nThreads_SphMarkers; computeGridSize((uint)numObjectsH->numRigidMarkers, 256, nBlocks_numRigid_SphMarkers, nThreads_SphMarkers); Calc_Rigid_FSI_ForcesD_TorquesD<<<nBlocks_numRigid_SphMarkers, nThreads_SphMarkers>>>( mR3CAST(fsiGeneralData->rigid_FSI_ForcesD), mR3CAST(fsiGeneralData->rigid_FSI_TorquesD), mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old), mR4CAST(sphMarkersD->posRadD), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), mR4CAST(fsiBodiesD->q_fsiBodies_D)); cudaDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::Flex_Forces(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD) { if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0) { return; } thrust::fill(fsiGeneralData->Flex_FSI_ForcesD.begin(), fsiGeneralData->Flex_FSI_ForcesD.end(), mR3(0)); uint nBlocks_numFlex_SphMarkers; uint nThreads_SphMarkers; computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks_numFlex_SphMarkers, nThreads_SphMarkers); Calc_Flex_FSI_ForcesD<<<nBlocks_numFlex_SphMarkers, nThreads_SphMarkers>>>( mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes), mR4CAST(fsiGeneralData->derivVelRhoD), mR4CAST(fsiGeneralData->derivVelRhoD_old), mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiGeneralData->Flex_FSI_ForcesD)); cudaDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::UpdateRigidMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiBodiesDataD> fsiBodiesD) { if (numObjectsH->numRigidBodies == 0) { return; } uint nBlocks_numRigid_SphMarkers; uint nThreads_SphMarkers; computeGridSize((int)numObjectsH->numRigidMarkers, 256, nBlocks_numRigid_SphMarkers, nThreads_SphMarkers); UpdateRigidMarkersPositionVelocityD<<<nBlocks_numRigid_SphMarkers, nThreads_SphMarkers>>>( mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D), U1CAST(fsiGeneralData->rigidIdentifierD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR4CAST(fsiBodiesD->velMassRigid_fsiBodies_D), mR3CAST(fsiBodiesD->omegaVelLRF_fsiBodies_D), mR4CAST(fsiBodiesD->q_fsiBodies_D)); cudaDeviceSynchronize(); cudaCheckError(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChBce::UpdateFlexMarkersPositionVelocity(std::shared_ptr<SphMarkerDataD> sphMarkersD, std::shared_ptr<FsiMeshDataD> fsiMeshD) { if ((numObjectsH->numFlexBodies1D + numObjectsH->numFlexBodies2D) == 0) { return; } uint nBlocks_numFlex_SphMarkers; uint nThreads_SphMarkers; printf("UpdateFlexMarkersPositionVelocity..\n"); computeGridSize((int)numObjectsH->numFlexMarkers, 256, nBlocks_numFlex_SphMarkers, nThreads_SphMarkers); UpdateFlexMarkersPositionVelocityAccD<<<nBlocks_numFlex_SphMarkers, nThreads_SphMarkers>>>( mR4CAST(sphMarkersD->posRadD), mR3CAST(fsiGeneralData->FlexSPH_MeshPos_LRF_D), mR3CAST(sphMarkersD->velMasD), U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D, U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes), mR3CAST(fsiMeshD->pos_fsi_fea_D), mR3CAST(fsiMeshD->vel_fsi_fea_D), paramsH->HSML * paramsH->MULT_INITSPACE_Shells); cudaDeviceSynchronize(); cudaCheckError(); } } // end namespace fsi } // end namespace chrono
5a0c6bc35578b705fea9cc3ccbb05cd6075b99d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "md5.cuh" //#include "md5cpu.h" #include <iostream> #include <stdio.h> #include "string.h" #include <fstream> using std::cout; using std::endl; void printResult(unsigned char* result){ char buf[33]; for (int i=0; i<16; i++) sprintf(buf+i*2, "%02x", result[i]); buf[32]=0; printf("%s\n", buf); } void charMemcpyCPU(unsigned char *buffer, unsigned char *data, int length){ int i; for(i=0; i<length; i++){ buffer[i] = data[i]; } } const std::string* cpuHash; int main() { //wczytanie bloku 128 bajtw danych z pliku std::ifstream plik1("mm", std::ios::binary); char cstr1[128]; unsigned char message[128]; plik1.read(cstr1, 128); printf("Dane wejsciowe M oraz N wczytane z pliku :\n"); for(int i=0; i<128; i=i+4) { printf("< %-+d %-+d %-+d %-+d > \n", (int)cstr1[i],(int)cstr1[i+1], (int)cstr1[i+2],(int)cstr1[i+3]); } printf("Przepisanie wczytanej wiadomosci do wektora wiadomosci W \n"); for(int i=0; i<128; i++){ message[i]=(unsigned char)cstr1[i]; } printf(" \n Roznice midzy dwoma wektroami \n"); for(int i=0; i<128; i++) { printf("< nr indeksu %d roznica %-+d > \n", i, ((char)cstr1[i]-(char)message[i])); } //wykorzystywany do liczenia md5 na CPU std::string str1=std::string(cstr1, 128); unsigned char* result = (unsigned char*) malloc(16*sizeof(unsigned char)); unsigned char* orygHash = (unsigned char*) malloc(16*sizeof(unsigned char)); //waciwa funkcja runMD5(message,orygHash, result, 128); printf("Dane wyjsciowe \n"); for(int i=0; i<128; i=i+4) { printf("< %-+d %-+d %-+d %-+d > \n", (int)(char)message[i],(int)(char)message[i+1], (int)(char)message[i+2],(int)(char)message[i+3]); } printf(" \n Roznice miedzy dwoma wektroami \n"); for(int i=0; i<128; i++) { printf("< nr indeksu %d roznica %-+d > \n", i, ((char)cstr1[i]-(char)message[i])); } printf(" \n Skrot wiadomoci oryginalnej obliczony przez biblioteke CPU: \n"); // cout<<md5(str1)<<" "<<endl; printf(" \n Skrot wiadomoci oryginalnej obliczony przez biblioteke GPU: \n"); printResult(orygHash); printf(" \n Skrot wiadomoci kolizyjnej obliczony przez biblioteke GPU: \n"); printResult(result); //to na dole, po to eby konsola si nie wyczaa od razu return 0; }
5a0c6bc35578b705fea9cc3ccbb05cd6075b99d6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "md5.cuh" //#include "md5cpu.h" #include <iostream> #include <stdio.h> #include "string.h" #include <fstream> using std::cout; using std::endl; void printResult(unsigned char* result){ char buf[33]; for (int i=0; i<16; i++) sprintf(buf+i*2, "%02x", result[i]); buf[32]=0; printf("%s\n", buf); } void charMemcpyCPU(unsigned char *buffer, unsigned char *data, int length){ int i; for(i=0; i<length; i++){ buffer[i] = data[i]; } } const std::string* cpuHash; int main() { //wczytanie bloku 128 bajtów danych z pliku std::ifstream plik1("mm", std::ios::binary); char cstr1[128]; unsigned char message[128]; plik1.read(cstr1, 128); printf("Dane wejsciowe M oraz N wczytane z pliku :\n"); for(int i=0; i<128; i=i+4) { printf("< %-+d %-+d %-+d %-+d > \n", (int)cstr1[i],(int)cstr1[i+1], (int)cstr1[i+2],(int)cstr1[i+3]); } printf("Przepisanie wczytanej wiadomosci do wektora wiadomosci W \n"); for(int i=0; i<128; i++){ message[i]=(unsigned char)cstr1[i]; } printf(" \n Roznice między dwoma wektroami \n"); for(int i=0; i<128; i++) { printf("< nr indeksu %d roznica %-+d > \n", i, ((char)cstr1[i]-(char)message[i])); } //wykorzystywany do liczenia md5 na CPU std::string str1=std::string(cstr1, 128); unsigned char* result = (unsigned char*) malloc(16*sizeof(unsigned char)); unsigned char* orygHash = (unsigned char*) malloc(16*sizeof(unsigned char)); //właściwa funkcja runMD5(message,orygHash, result, 128); printf("Dane wyjsciowe \n"); for(int i=0; i<128; i=i+4) { printf("< %-+d %-+d %-+d %-+d > \n", (int)(char)message[i],(int)(char)message[i+1], (int)(char)message[i+2],(int)(char)message[i+3]); } printf(" \n Roznice miedzy dwoma wektroami \n"); for(int i=0; i<128; i++) { printf("< nr indeksu %d roznica %-+d > \n", i, ((char)cstr1[i]-(char)message[i])); } printf(" \n Skrot wiadomości oryginalnej obliczony przez biblioteke CPU: \n"); // cout<<md5(str1)<<" "<<endl; printf(" \n Skrot wiadomości oryginalnej obliczony przez biblioteke GPU: \n"); printResult(orygHash); printf(" \n Skrot wiadomości kolizyjnej obliczony przez biblioteke GPU: \n"); printResult(result); //to na dole, po to żeby konsola się nie wyłączała od razu return 0; }
1ae9f9d30f077906000ba15135588ca0839682e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include "maxpool_layer.h" #include "hip/hip_runtime.h" __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int area = (size-1)/stride; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int index = id; int j = id % in_w; id /= in_w; int i = id % in_h; id /= in_h; int k = id % in_c; id /= in_c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; float d = 0; int l, m; for(l = -area; l < area+1; ++l){ for(m = -area; m < area+1; ++m){ int out_w = (j-w_offset)/stride + m; int out_h = (i-h_offset)/stride + l; int out_index = out_w + w*(out_h + h*(k + c*b)); int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h); d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; } } prev_delta[index] += d; } void forward_maxpool_layer_gpu(maxpool_layer layer, network net) { int h = layer.out_h; int w = layer.out_w; int c = layer.c; size_t n = h*w*c*layer.batch; hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu); check_error(hipPeekAtLastError()); } void backward_maxpool_layer_gpu(maxpool_layer layer, network net) { size_t n = layer.h*layer.w*layer.c*layer.batch; hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu); check_error(hipPeekAtLastError()); }
1ae9f9d30f077906000ba15135588ca0839682e7.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include "maxpool_layer.h" #include "cuda.h" __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int area = (size-1)/stride; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int index = id; int j = id % in_w; id /= in_w; int i = id % in_h; id /= in_h; int k = id % in_c; id /= in_c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; float d = 0; int l, m; for(l = -area; l < area+1; ++l){ for(m = -area; m < area+1; ++m){ int out_w = (j-w_offset)/stride + m; int out_h = (i-h_offset)/stride + l; int out_index = out_w + w*(out_h + h*(k + c*b)); int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h); d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; } } prev_delta[index] += d; } void forward_maxpool_layer_gpu(maxpool_layer layer, network net) { int h = layer.out_h; int w = layer.out_w; int c = layer.c; size_t n = h*w*c*layer.batch; forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu); check_error(cudaPeekAtLastError()); } void backward_maxpool_layer_gpu(maxpool_layer layer, network net) { size_t n = layer.h*layer.w*layer.c*layer.batch; backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu); check_error(cudaPeekAtLastError()); }