hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
7606b5125cbe6fb0499e6041b3eb0508192167e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == size - 1) accumulatedSize[part[idx]] = weights[idx]; if (idx < size - 1) { int thisPart = part[idx]; if (thisPart != part[idx + 1]) accumulatedSize[thisPart] = weights[idx]; } }
7606b5125cbe6fb0499e6041b3eb0508192167e1.cu
#include "includes.h" __global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == size - 1) accumulatedSize[part[idx]] = weights[idx]; if (idx < size - 1) { int thisPart = part[idx]; if (thisPart != part[idx + 1]) accumulatedSize[thisPart] = weights[idx]; } }
70880df999250c856a1100c6b71818d2398d6369.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <torch/types.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> static __host__ __device__ __forceinline__ int floor_div(int a, int b) { int c = a / b; if (c * b > a) { c--; } return c; } struct UpFirDn2DKernelParams { int up_x; int up_y; int down_x; int down_y; int pad_x0; int pad_x1; int pad_y0; int pad_y1; int major_dim; int in_h; int in_w; int minor_dim; int kernel_h; int kernel_w; int out_h; int out_w; int loop_major; int loop_x; }; template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w> __global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) { const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; __shared__ volatile float sk[kernel_h][kernel_w]; __shared__ volatile float sx[tile_in_h][tile_in_w]; int minor_idx = blockIdx.x; int tile_out_y = minor_idx / p.minor_dim; minor_idx -= tile_out_y * p.minor_dim; tile_out_y *= tile_out_h; int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; int major_idx_base = blockIdx.z * p.loop_major; if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) { return; } for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) { int ky = tap_idx / kernel_w; int kx = tap_idx - ky * kernel_w; scalar_t v = 0.0; if (kx < p.kernel_w & ky < p.kernel_h) { v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; } sk[ky][kx] = v; } for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) { int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; int tile_in_x = floor_div(tile_mid_x, up_x); int tile_in_y = floor_div(tile_mid_y, up_y); __syncthreads(); for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) { int rel_in_y = in_idx / tile_in_w; int rel_in_x = in_idx - rel_in_y * tile_in_w; int in_x = rel_in_x + tile_in_x; int in_y = rel_in_y + tile_in_y; scalar_t v = 0.0; if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; } sx[rel_in_y][rel_in_x] = v; } __syncthreads(); for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) { int rel_out_y = out_idx / tile_out_w; int rel_out_x = out_idx - rel_out_y * tile_out_w; int out_x = rel_out_x + tile_out_x; int out_y = rel_out_y + tile_out_y; int mid_x = tile_mid_x + rel_out_x * down_x; int mid_y = tile_mid_y + rel_out_y * down_y; int in_x = floor_div(mid_x, up_x); int in_y = floor_div(mid_y, up_y); int rel_in_x = in_x - tile_in_x; int rel_in_y = in_y - tile_in_y; int kernel_x = (in_x + 1) * up_x - mid_x - 1; int kernel_y = (in_y + 1) * up_y - mid_y - 1; scalar_t v = 0.0; #pragma unroll for (int y = 0; y < kernel_h / up_y; y++) #pragma unroll for (int x = 0; x < kernel_w / up_x; x++) v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x]; if (out_x < p.out_w & out_y < p.out_h) { out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } } } torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, int up_x, int up_y, int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1) { int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); UpFirDn2DKernelParams p; auto x = input.contiguous(); auto k = kernel.contiguous(); p.major_dim = x.size(0); p.in_h = x.size(1); p.in_w = x.size(2); p.minor_dim = x.size(3); p.kernel_h = k.size(0); p.kernel_w = k.size(1); p.up_x = up_x; p.up_y = up_y; p.down_x = down_x; p.down_y = down_y; p.pad_x0 = pad_x0; p.pad_x1 = pad_x1; p.pad_y0 = pad_y0; p.pad_y1 = pad_y1; p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y; p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x; auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); int mode = -1; int tile_out_h; int tile_out_w; if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 1; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) { mode = 2; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 3; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 4; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 5; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 6; tile_out_h = 8; tile_out_w = 32; } dim3 block_size; dim3 grid_size; if (tile_out_h > 0 && tile_out_w) { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 1; block_size = dim3(32 * 8, 1, 1); grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, (p.major_dim - 1) / p.loop_major + 1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { switch (mode) { case 1: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 2: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 3: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 4: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 5: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 6: hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>), dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; } }); return out; }
70880df999250c856a1100c6b71818d2398d6369.cu
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <torch/types.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> static __host__ __device__ __forceinline__ int floor_div(int a, int b) { int c = a / b; if (c * b > a) { c--; } return c; } struct UpFirDn2DKernelParams { int up_x; int up_y; int down_x; int down_y; int pad_x0; int pad_x1; int pad_y0; int pad_y1; int major_dim; int in_h; int in_w; int minor_dim; int kernel_h; int kernel_w; int out_h; int out_w; int loop_major; int loop_x; }; template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w> __global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) { const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; __shared__ volatile float sk[kernel_h][kernel_w]; __shared__ volatile float sx[tile_in_h][tile_in_w]; int minor_idx = blockIdx.x; int tile_out_y = minor_idx / p.minor_dim; minor_idx -= tile_out_y * p.minor_dim; tile_out_y *= tile_out_h; int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; int major_idx_base = blockIdx.z * p.loop_major; if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) { return; } for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) { int ky = tap_idx / kernel_w; int kx = tap_idx - ky * kernel_w; scalar_t v = 0.0; if (kx < p.kernel_w & ky < p.kernel_h) { v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; } sk[ky][kx] = v; } for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) { int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; int tile_in_x = floor_div(tile_mid_x, up_x); int tile_in_y = floor_div(tile_mid_y, up_y); __syncthreads(); for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) { int rel_in_y = in_idx / tile_in_w; int rel_in_x = in_idx - rel_in_y * tile_in_w; int in_x = rel_in_x + tile_in_x; int in_y = rel_in_y + tile_in_y; scalar_t v = 0.0; if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; } sx[rel_in_y][rel_in_x] = v; } __syncthreads(); for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) { int rel_out_y = out_idx / tile_out_w; int rel_out_x = out_idx - rel_out_y * tile_out_w; int out_x = rel_out_x + tile_out_x; int out_y = rel_out_y + tile_out_y; int mid_x = tile_mid_x + rel_out_x * down_x; int mid_y = tile_mid_y + rel_out_y * down_y; int in_x = floor_div(mid_x, up_x); int in_y = floor_div(mid_y, up_y); int rel_in_x = in_x - tile_in_x; int rel_in_y = in_y - tile_in_y; int kernel_x = (in_x + 1) * up_x - mid_x - 1; int kernel_y = (in_y + 1) * up_y - mid_y - 1; scalar_t v = 0.0; #pragma unroll for (int y = 0; y < kernel_h / up_y; y++) #pragma unroll for (int x = 0; x < kernel_w / up_x; x++) v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x]; if (out_x < p.out_w & out_y < p.out_h) { out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } } } torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, int up_x, int up_y, int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1) { int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); UpFirDn2DKernelParams p; auto x = input.contiguous(); auto k = kernel.contiguous(); p.major_dim = x.size(0); p.in_h = x.size(1); p.in_w = x.size(2); p.minor_dim = x.size(3); p.kernel_h = k.size(0); p.kernel_w = k.size(1); p.up_x = up_x; p.up_y = up_y; p.down_x = down_x; p.down_y = down_y; p.pad_x0 = pad_x0; p.pad_x1 = pad_x1; p.pad_y0 = pad_y0; p.pad_y1 = pad_y1; p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y; p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x; auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); int mode = -1; int tile_out_h; int tile_out_w; if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 1; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) { mode = 2; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 3; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 4; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) { mode = 5; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) { mode = 6; tile_out_h = 8; tile_out_w = 32; } dim3 block_size; dim3 grid_size; if (tile_out_h > 0 && tile_out_w) { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 1; block_size = dim3(32 * 8, 1, 1); grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, (p.major_dim - 1) / p.loop_major + 1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { switch (mode) { case 1: upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 2: upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 3: upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 4: upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 5: upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; case 6: upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>( out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p ); break; } }); return out; }
3b3f6083b39c96cb58cba230f1ca963bac1fc84e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<stdlib.h> #include<math.h> __global__ void alter(int *a, int * t){ int n = threadIdx.x, m=blockIdx.x, size = blockDim.x; t[m*size + n] = 1; int d = m; while(d>=0){ t[m*size+n]*=a[m*size+n]; d--; } } int main(void){ int *a,*t,m,n,i,j; int *d_a,*d_t; printf("Enter the value of m : "); scanf("%d",&m); printf("Enter the value of n : "); scanf("%d",&n); int size = sizeof(int) * m * n; a = (int*)malloc(m*n*sizeof(int)); t = (int*)malloc(m*n*sizeof(int)); printf("Enter input matrix : \n"); for(i=0;i<m*n;i++){ scanf("%d",&a[i]); } hipMalloc((void**)&d_a,size); hipMalloc((void**)&d_t,size); hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( alter), dim3(m),dim3(n), 0, 0, d_a,d_t); hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost); printf("Result vector is : \n"); for(i=0;i<m;i++){ for(j=0;j<n;j++){ printf("%d\t",t[i*n+j]); } printf("\n"); } getchar(); hipFree(d_a); hipFree(d_t); return 0; }
3b3f6083b39c96cb58cba230f1ca963bac1fc84e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<stdlib.h> #include<math.h> __global__ void alter(int *a, int * t){ int n = threadIdx.x, m=blockIdx.x, size = blockDim.x; t[m*size + n] = 1; int d = m; while(d>=0){ t[m*size+n]*=a[m*size+n]; d--; } } int main(void){ int *a,*t,m,n,i,j; int *d_a,*d_t; printf("Enter the value of m : "); scanf("%d",&m); printf("Enter the value of n : "); scanf("%d",&n); int size = sizeof(int) * m * n; a = (int*)malloc(m*n*sizeof(int)); t = (int*)malloc(m*n*sizeof(int)); printf("Enter input matrix : \n"); for(i=0;i<m*n;i++){ scanf("%d",&a[i]); } cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_t,size); cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); alter<<<m,n>>>(d_a,d_t); cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost); printf("Result vector is : \n"); for(i=0;i<m;i++){ for(j=0;j<n;j++){ printf("%d\t",t[i*n+j]); } printf("\n"); } getchar(); cudaFree(d_a); cudaFree(d_t); return 0; }
db3f362c56858cc39ecfca02880022f32bdaf2a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-26 */ #include "LogSoftmax.h" #include "LogSoftmax.cuh" #include "Loss.cuh" #include "../core/arithmetic/MultiplyDim.h" #include "../core/reduce/ReduceSum.cuh" #include "../core/reduce/ReduceMax.cuh" #include "../XDevice.h" #include "device_launch_parameters.h" #include "hip/hip_fp16.h" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) to save the values in int register. the function have been transformed into taylor extension, and multiply a value to enlarge the temporary things >> val - x >> inputSum - sum_{i} note that to modify this funciton, should modify the kernal function there cannot call others, and define is a little difficule */ /* * $Created by: YIN FEI (email: -) 2019-01-25 */ #define COEFFICIENT_FUNCTION_INT 1000 #define LN_COEFFICIENT_FUNCTION_INT 7 // ln 1000 = 6.9 inline void FUNCTION_INT(int * val, int * inputSum) { int value = *val; /* exp() taylor extension */ /* for the speed, directly use the register number */ /* very important things!!! here omit one because later will minus it */ int toMul = 1, sum = 1; toMul *= value; sum += toMul / 1; toMul *= value; sum += toMul / 2; toMul *= value; sum += toMul / 6; /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ /* to ensure the int value can save the value, expand a value */ sum *= COEFFICIENT_FUNCTION_INT; sum /= (*inputSum); /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3;*/ /* ... */ /* minus the value that multiply the coefficient affects the value */ // sum -= LN_COEFFICIENT_FUNCTION_INT; /* consider here is a larger than 1, so the log is useless */ /* here change the function as a - x */ *val = -sum; } /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) >> x - input vector >> y - result >> leadDim - leading dimension (along which we perform reduction) */ void _CudaLogSoftmax(const XTensor * x, XTensor * y, int leadDim) { ShowNTErrors("You should call LogSoftmax instead!"); } /* log softmax forward computation (Cuda kernel) for each column j, let y_{i,j} and x_{i,j} are the output and state value for the i-th element of column j. We have y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each column j >> sum - \sum_{i} e^{x_{i,j}) for each column j >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByRow(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; /*DTYPE value = x[key] - inputMax[threadIdx.x]; DTYPE toMul = 1, sum = 1; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0; DTYPE r = log(sum / inputSum[threadIdx.x]);*/ //DTYPE r = log(exp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); DTYPE r = log(pow((float)2.0, x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); if (isnan(r)) r = LOGPROB_MIN; if (isinf(r)) r = LOGPROB_MIN; y[key] = MAX(r, LOGPROB_MIN); } } /*Half precision log softmax forward computation (Cuda kernel) for each column j, let y_{i,j} and x_{i,j} are the output and state value for the i-th element of column j. We have y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each column j >> sum - \sum_{i} e^{x_{i,j}) for each column j >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByRowHalf(half * x, half * max, half * sum, half * y, int rowNum, int colNum) { __shared__ half inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ half inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; //half r = hlog(hexp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]);//cuda_fp16 line:1790 ... half r = hlog(hexp(x[key] - inputMax[threadIdx.x])); y[key] = r; } } /* int precision log softmax forward computation (Cuda kernel) for each column j, let y_{i,j} and x_{i,j} are the output and state value for the i-th element of column j. We have y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each column j >> sum - \sum_{i} e^{x_{i,j}) for each column j >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByRowINT(int * x, int * max, int * sum, int * y, int rowNum, int colNum) { __shared__ int inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; //half r = hlog(hexp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]);//cuda_fp16 line:1790 ... // original, log (exp) // int r = hlog(hexp(x[key] - inputMax[threadIdx.x])); //int value = x[key] - inputMax[threadIdx.x]; y[key] = x[key] - inputMax[threadIdx.x] - 10; //FUNCTION_INT(&value, &inputSum[threadIdx.x]); /* exp() taylor extension */ /* for the speed, directly use the register number */ /* very important things!!! here omit one because later will minus it */ /*int toMul = 1, sum = 1; toMul *= value; sum += toMul / 1; toMul *= value; sum += toMul / 2; toMul *= value; sum += toMul / 6;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ /* to ensure the int value can save the value, expand a value */ /*sum *= COEFFICIENT_FUNCTION_INT; sum /= inputSum[threadIdx.x];*/ /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3;*/ /* ... */ /* minus the value that multiply the coefficient affects the value */ // sum -= LN_COEFFICIENT_FUNCTION_INT; /* consider here is a larger than 1, so the log is useless */ /* here change the function as a - x */ } } __global__ void KernelLogSoftmaxComputeByRowFloatTest(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; DTYPE value = x[key] - inputMax[threadIdx.x]; /* exp() taylor extension */ /* for the speed, directly use the register number */ DTYPE sum = exp(value); /*DTYPE toMul = 1.0, sum = 1.0; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ value = sum / inputSum[threadIdx.x]; /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3; value = sum;*/ value = log(value); /* problem log can extend to test also */ //DTYPE r = log(exp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); if (isnan(value)) value = LOGPROB_MIN; if (isinf(value)) value = LOGPROB_MIN; y[key] = MAX(value, LOGPROB_MIN); } } /* log softmax forward computation (Cuda kernel) for each row i, let y_{i,j} and x_{i,j} are the output and state value for the j-th element of row i. We have y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each row i >> sum - \sum_{j} e^{x_{i,j}) for each row i >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByCol(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; /*DTYPE value = x[key] - inputMax[threadIdx.y]; DTYPE toMul = 1, sum = 1; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0; DTYPE r = log(sum / inputSum[threadIdx.y]);*/ // DTYPE r = log(exp(x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); /*if ((x[key] - inputMax[threadIdx.x]) < -100.0) { printf("kernal col value: %.3e %.3e %.3e\n", x[key], inputMax[threadIdx.y], x[key] - inputMax[threadIdx.y]); }*/ //fprintf(in, "%.3f\n", x[key] - inputMax[threadIdx.x]); DTYPE r = log(pow((float)2.0, x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); /*if (r < LOGPROB_MIN) { printf("min %e %e, %e %e, %e %e\n", r, x[key] - inputMax[threadIdx.y], x[key], inputMax[threadIdx.y], exp(x[key] - inputMax[threadIdx.y]), inputSum[threadIdx.y]); }*/ if (isnan(r)) r = LOGPROB_MIN; if (isinf(r)) r = LOGPROB_MIN; y[key] = MAX(r, LOGPROB_MIN); } } /*Half precision log softmax forward computation (Cuda kernel) for each row i, let y_{i,j} and x_{i,j} are the output and state value for the j-th element of row i. We have y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each row i >> sum - \sum_{j} e^{x_{i,j}) for each row i >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByColHalf(half * x, half * max, half * sum, half * y, int rowNum, int colNum) { __shared__ half inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ half inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; half r = hlog(hexp(x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); y[key] = r; } } /* int precision log softmax forward computation (Cuda kernel) for each row i, let y_{i,j} and x_{i,j} are the output and state value for the j-th element of row i. We have y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each row i >> sum - \sum_{j} e^{x_{i,j}) for each row i >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByColINT(int * x, int * max, int * sum, int * y, int rowNum, int colNum) { __shared__ int inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; /* problem */ // half r = hlog(hexp(x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); //cuda_fp16 line:1790 ... // original, log (exp) // int r = hlog(hexp(x[key] - inputMax[threadIdx.x])); //int value = x[key] - inputMax[threadIdx.y]; y[key] = x[key] - inputMax[threadIdx.y] - 10; // FUNCTION_INT(&value, &inputSum[threadIdx.y]); /* exp() taylor extension */ /* for the speed, directly use the register number */ /* very important things!!! here omit one because later will minus it */ /*int toMul = 1, sum = 1; toMul *= value; sum += toMul / 1; toMul *= value; sum += toMul / 2; toMul *= value; sum += toMul / 6;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ /* to ensure the int value can save the value, expand a value */ /*sum *= COEFFICIENT_FUNCTION_INT; sum /= inputSum[threadIdx.y]; */ /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3;*/ /* ... */ /* minus the value that multiply the coefficient affects the value */ // sum -= LN_COEFFICIENT_FUNCTION_INT; /* consider here is a larger than 1, so the log is useless */ /* here change the function as a - x */ } } __global__ void KernelLogSoftmaxComputeByColFloatTest(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; DTYPE value = x[key] - inputMax[threadIdx.y]; /* exp() taylor extension */ /* for the speed, directly use the register number */ DTYPE sum = exp(value); /*DTYPE toMul = 1.0, sum = 1.0; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ value = sum / inputSum[threadIdx.y]; /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3; value = sum;*/ value = log(value); /* problem log can extend to test also */ //DTYPE r = log(exp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); if (isnan(value)) value = LOGPROB_MIN; if (isinf(value)) value = LOGPROB_MIN; y[key] = MAX(value, LOGPROB_MIN); } } /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) >> x - input vector >> y - result >> leadDim - leading dimension (along which we perform reduction) >> sum - \sum_{i} e^{x_i} >> max - \max_{i} e^{x_i} */ void _CudaLogSoftmaxSumMax(XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max) { CheckNTErrors((x->devID >= 0), "Forward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID), "Input tensors must be on the same GPU."); CheckNTErrors((x->order == y->order), "Input tensors must be of the same size."); CheckNTErrors((x->order == 2), "Input tensors must be of order 2."); int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ DTYPE * maxData = (DTYPE*)max->data; DTYPE * sumData = (DTYPE*)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRow << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByCol << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } } else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ __half * maxData = (half*)max->data; __half * sumData = (half*)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ /* int has been modified, into the int sentence */ KernelLogSoftmaxComputeByRowHalf << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((half*)x->data, maxData, sumData, (half *)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByColHalf << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((half*)x->data, maxData, sumData, (half*)y->data, n, m); } } else if (x->dataType == X_INT && y->dataType == X_INT) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ int * maxData = (int *)max->data; int * sumData = (int *)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRowINT << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((int *)x->data, maxData, sumData, (int *)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByColINT << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((int *)x->data, maxData, sumData, (int *)y->data, n, m); } } else { ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } void _CudaLogSoftmaxSumMaxFloatTest(XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max) { CheckNTErrors((x->devID >= 0), "Forward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID), "Input tensors must be on the same GPU."); CheckNTErrors((x->order == y->order), "Input tensors must be of the same size."); CheckNTErrors((x->order == 2), "Input tensors must be of order 2."); int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ DTYPE * maxData = (DTYPE *)max->data; DTYPE * sumData = (DTYPE *)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRowFloatTest << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByColFloatTest << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } } else { ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } /* set dE/dx = exp(y) >> dedy - dE/dy >> dedx - dE/dx >> y - output of the function >> size - size of output >> lossName - name of the loss function */ __global__ void KernelExpLoss(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size, LOSS_FUNCTION_NAME lossName) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { /* dE/dx_j = exp(y_j) */ if (lossName == CROSSENTROPY) dedx[i] = exp(y[i]); /* dE/dx_j = exp(y_j) */ else if (lossName == SQUAREDERROR) dedx[i] = exp(y[i]); else if (lossName == ONEHOTERROR) dedx[i] = 0; else dedx[i] = 0; } } /* backward computation for log softmax dE/dx = dE/dy * dy/dx >> dedy - dE/dy >> dedx - dE/dx >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> size - size of input/output >> lossName - name of the loss function */ __global__ void KernelLogSoftmaxBackwardDEDS(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x, int size, LOSS_FUNCTION_NAME lossName) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { DTYPE r = 0; /* dE/ds_j = exp(y_j) */ if (lossName == CROSSENTROPY) r = -gold[i] + exp(y[i]); /* origin *//* */ /* r = -gold[i]; /* here to change */ /* dE/ds_j = exp(y_j) */ else if (lossName == SQUAREDERROR) r = -gold[i] + exp(y[i]); else if (lossName == ONEHOTERROR) { if (gold[i] == 1.0F) r = -gold[i] + exp(y[i]); else r = 0; } else { r = dedy[i]; } if (isnan(r)) r = 0; if (isinf(r)) r = 0; dedx[i] = r; } } /* backward computation for log softmax (sparse matrices) for each column dE/dx_j += -gold_j (for dE/dx = dE/dy * dy/dx) >> dedy - dE/dy >> dedx - dE/dx >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> rowNum - row number of the matrix >> colNum - column number of the matrix >> gNonZeroNum - >> lossName - name of the loss function */ __global__ void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void * gold, DTYPE * y, DTYPE * x, int rowNum, int colNum, int gNonZeroNum, LOSS_FUNCTION_NAME lossName) { int tupleSize = sizeof(int) + sizeof(DTYPE); int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < gNonZeroNum) { /* load the sub-block of the sparse matrix b */ int key = *(int*)((char*)gold + tupleSize * k); int ni = key / colNum; int mi = key % colNum; int value = *(DTYPE*)((char*)gold + tupleSize * k + sizeof(int)); if (lossName == CROSSENTROPY) dedx[colNum * ni + mi] += -value; else if (lossName == SQUAREDERROR) dedx[colNum * ni + mi] += -value; else if (lossName == ONEHOTERROR) { int offset = colNum * ni + mi; if (value == 1.0F) dedx[offset] += (-value + exp(y[offset])); //dedx[offset] += -value * 0.005; } } } /* backward computation for dense matrics with default data type dE/dx = dE/dy * dy/dx log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}) dy_i/dx_j = d{log(e^{x_i} / \sum_{k} e^{x_k})}/dx_j = d{log(e^{x_i})}/dx_j - d{log(\sum_{k} e^{x_k})}/dx_j = \delta(i,j) - e^{x_j}/\sum_{k} e^{x_k}) = \delta(i,j) - exp(y_j) where \delta(i,j) = 1 if i = j, and \delta(i,j) = 0 otherwise if loss E is defined as cross entropy, i.e., E = -\sum_{k} (gold_k * y_k), we have dE/dy_i = -gold_i (where {gold_k} is the gold standard distribution) then dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j} = \sum_{i} {-gold_i * (\delta(i,j) - exp(y_j))} = \sum_{i} {-gold_i * \delta{i,j)} + \sum_{i} {gold_i * exp(y_j)} = -gold_i * \delta(i,j) + \sum_{i} {gold_i * exp(y_j)} = -gold_j + exp(y_j) Note: gold_i is a distribution, i.e., \sum_{i} gold_i = 1 if gold is with a one-hot representation (gold_i = 1 for only one dimension), we can reformulize it as dE/dx_j = -\delta(i,j) + exp(y_j) There are two ways to implement this process. Method 1. we compute dE/dy and dy/dx resepectively, and then reach dE/dx by dE/dx = dE/dy * dy/dx (or more precisely dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j}) Method 2. we compute dE/dx (or dE/dx_j) in a single step, rather than resorting to the sub-models dE/dy and dy/dx. We can do this by using dE/dx_j = -gold_j + exp(y_j) Here we choose Method 2, i.e., we straightforwardly compute dE/dx_j by dE/dx_j = -gold_j + exp(y_j) (or dE/dx_j = -\delta(i,j) + exp(y_j) for a Maximum A Posteriori Estimation (MAP)) Method 1 is also fine but is more time consuming due to the summation over dimensions. Note that this method is not good for the standard version softmax when working with the cross entropy loss. Because it is numerical unstable. When we use a usual method to define softmax, we have softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}). It is trivial to know that dy_i/dx_j = y_i * \delta(i,j) - y_i * y_j. As y_i and y_j could be a small number, y_i * y_i would result in a much smaller one with a risk of lossing precision. This is even worse we multiply dy_i/dx_j with dE/dy_i. So it is in general to use log softmax instead for better numerical stability. >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> dedy - dE/dy >> deds - dE/dx >> lossName - type of loss function, e.g., cross entropy >> leadDim - leading dimension (along which we perform reduction) */ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, XTensor * dedy, XTensor * dedx, XTensor * padding, int leadDim, LOSS_FUNCTION_NAME lossName) { leadDim = leadDim < 0 ? y->order - 1 : leadDim; CheckNTErrors((x->devID >= 0), "Backward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID && gold->devID == y->devID), "Tensors used in log softmax are not on the same GPU."); CheckNTErrors((gold != NULL), "No x gold standard is found!"); int leadDimRDI = y->order - leadDim - 1; int dimensionSize = y->dimSizeRDI[leadDimRDI]; int stride = 1; int blockSize = 1; int blockNum = 1; for (int i = 0; i < leadDimRDI; i++) stride *= y->dimSizeRDI[i]; blockSize = stride * dimensionSize; blockNum = y->unitNum / blockSize; int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { CheckNTErrors((lossName == CROSSENTROPY || lossName == SQUAREDERROR || lossName == NOLOSS), "Unknown loss function."); int cudaGridSize[3], cudaBlockSize[3]; if (lossName == CROSSENTROPY || lossName == SQUAREDERROR) { if (gold->isSparse) { CheckNTErrors((gold->order == 2), "TODO!") CheckNTErrors((leadDim == 0), "TODO!"); GDevs.GetCudaThread(x->devID, x->unitNum, cudaGridSize, cudaBlockSize); /* dE/ds_j = exp(y_j) */ hipLaunchKernelGGL(( KernelExpLoss) , dim3(dim3(cudaGridSize[0])), dim3(dim3(cudaBlockSize[0])) , 0, 0, NULL, (DTYPE*)dedx->data, (DTYPE*)y->data, dimensionSize * stride, lossName); GDevs.GetCudaThread(x->devID, gold->unitNumNonZero, cudaGridSize, cudaBlockSize); /* dE/ds_j += -gold_j */ hipLaunchKernelGGL(( KernelLogSoftmaxBackwardDEDSSparseByRow) , dim3(dim3(cudaGridSize[0])), dim3(dim3(cudaBlockSize[0])) , 0, 0, NULL, (DTYPE*)dedx->data, (char*)gold->data + sizeof(int), (DTYPE*)y->data, (DTYPE*)x->data, dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName); } else { CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!"); for (int k = 0; k < blockNum; k++) { GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize); /* change inside here */ /* dE/ds_j = -gold_j + exp(y_j) */ hipLaunchKernelGGL(( KernelLogSoftmaxBackwardDEDS) , dim3(dim3(cudaGridSize[0])), dim3(dim3(cudaBlockSize[0])) , 0, 0, NULL, (DTYPE*)dedx->data + k * blockSize, (DTYPE*)gold->data + k * blockSize, (DTYPE*)y->data + k * blockSize, (DTYPE*)x->data + k * blockSize, dimensionSize * stride, lossName); } } if(padding != NULL) { int n = leadDim; int paddingOrder = padding->order; int * paddingDims = new int[paddingOrder]; memcpy(paddingDims, padding->dimSize, padding->order * sizeof(int)); padding->Reshape(padding->unitNum); int order = dedx->order; int * dims = new int[order]; memcpy(dims, dedx->dimSize, dedx->order * sizeof(int)); dedx->Reshape(dedx->unitNum/dedx->GetDim(n), dedx->GetDim(n)); _MultiplyDimMe(dedx, padding, 0); padding->Reshape(paddingOrder, paddingDims); dedx->Reshape(order, dims); delete[] paddingDims; delete[] dims; } } else { ShowNTErrors("TODO!"); } } else{ ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } #endif } // namespace nts(NiuTrans.Tensor)
db3f362c56858cc39ecfca02880022f32bdaf2a1.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-26 */ #include "LogSoftmax.h" #include "LogSoftmax.cuh" #include "Loss.cuh" #include "../core/arithmetic/MultiplyDim.h" #include "../core/reduce/ReduceSum.cuh" #include "../core/reduce/ReduceMax.cuh" #include "../XDevice.h" #include "device_launch_parameters.h" #include "cuda_fp16.h" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) to save the values in int register. the function have been transformed into taylor extension, and multiply a value to enlarge the temporary things >> val - x >> inputSum - sum_{i} note that to modify this funciton, should modify the kernal function there cannot call others, and define is a little difficule */ /* * $Created by: YIN FEI (email: -) 2019-01-25 */ #define COEFFICIENT_FUNCTION_INT 1000 #define LN_COEFFICIENT_FUNCTION_INT 7 // ln 1000 = 6.9 inline void FUNCTION_INT(int * val, int * inputSum) { int value = *val; /* exp() taylor extension */ /* for the speed, directly use the register number */ /* very important things!!! here omit one because later will minus it */ int toMul = 1, sum = 1; toMul *= value; sum += toMul / 1; toMul *= value; sum += toMul / 2; toMul *= value; sum += toMul / 6; /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ /* to ensure the int value can save the value, expand a value */ sum *= COEFFICIENT_FUNCTION_INT; sum /= (*inputSum); /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3;*/ /* ... */ /* minus the value that multiply the coefficient affects the value */ // sum -= LN_COEFFICIENT_FUNCTION_INT; /* consider here is a larger than 1, so the log is useless */ /* here change the function as a - x */ *val = -sum; } /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) >> x - input vector >> y - result >> leadDim - leading dimension (along which we perform reduction) */ void _CudaLogSoftmax(const XTensor * x, XTensor * y, int leadDim) { ShowNTErrors("You should call LogSoftmax instead!"); } /* log softmax forward computation (Cuda kernel) for each column j, let y_{i,j} and x_{i,j} are the output and state value for the i-th element of column j. We have y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each column j >> sum - \sum_{i} e^{x_{i,j}) for each column j >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByRow(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; /*DTYPE value = x[key] - inputMax[threadIdx.x]; DTYPE toMul = 1, sum = 1; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0; DTYPE r = log(sum / inputSum[threadIdx.x]);*/ //DTYPE r = log(exp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); DTYPE r = log(pow((float)2.0, x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); if (isnan(r)) r = LOGPROB_MIN; if (isinf(r)) r = LOGPROB_MIN; y[key] = MAX(r, LOGPROB_MIN); } } /*Half precision log softmax forward computation (Cuda kernel) for each column j, let y_{i,j} and x_{i,j} are the output and state value for the i-th element of column j. We have y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each column j >> sum - \sum_{i} e^{x_{i,j}) for each column j >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByRowHalf(half * x, half * max, half * sum, half * y, int rowNum, int colNum) { __shared__ half inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ half inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; //half r = hlog(hexp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]);//cuda_fp16 line:1790 ... half r = hlog(hexp(x[key] - inputMax[threadIdx.x])); y[key] = r; } } /* int precision log softmax forward computation (Cuda kernel) for each column j, let y_{i,j} and x_{i,j} are the output and state value for the i-th element of column j. We have y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each column j >> sum - \sum_{i} e^{x_{i,j}) for each column j >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByRowINT(int * x, int * max, int * sum, int * y, int rowNum, int colNum) { __shared__ int inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; //half r = hlog(hexp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]);//cuda_fp16 line:1790 ... // original, log (exp) // int r = hlog(hexp(x[key] - inputMax[threadIdx.x])); //int value = x[key] - inputMax[threadIdx.x]; y[key] = x[key] - inputMax[threadIdx.x] - 10; //FUNCTION_INT(&value, &inputSum[threadIdx.x]); /* exp() taylor extension */ /* for the speed, directly use the register number */ /* very important things!!! here omit one because later will minus it */ /*int toMul = 1, sum = 1; toMul *= value; sum += toMul / 1; toMul *= value; sum += toMul / 2; toMul *= value; sum += toMul / 6;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ /* to ensure the int value can save the value, expand a value */ /*sum *= COEFFICIENT_FUNCTION_INT; sum /= inputSum[threadIdx.x];*/ /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3;*/ /* ... */ /* minus the value that multiply the coefficient affects the value */ // sum -= LN_COEFFICIENT_FUNCTION_INT; /* consider here is a larger than 1, so the log is useless */ /* here change the function as a - x */ } } __global__ void KernelLogSoftmaxComputeByRowFloatTest(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each column */ if (threadIdx.y == 0) { inputSum[threadIdx.x] = sum[j]; inputMax[threadIdx.x] = max[j]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; DTYPE value = x[key] - inputMax[threadIdx.x]; /* exp() taylor extension */ /* for the speed, directly use the register number */ DTYPE sum = exp(value); /*DTYPE toMul = 1.0, sum = 1.0; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ value = sum / inputSum[threadIdx.x]; /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3; value = sum;*/ value = log(value); /* problem log can extend to test also */ //DTYPE r = log(exp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); if (isnan(value)) value = LOGPROB_MIN; if (isinf(value)) value = LOGPROB_MIN; y[key] = MAX(value, LOGPROB_MIN); } } /* log softmax forward computation (Cuda kernel) for each row i, let y_{i,j} and x_{i,j} are the output and state value for the j-th element of row i. We have y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each row i >> sum - \sum_{j} e^{x_{i,j}) for each row i >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByCol(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; /*DTYPE value = x[key] - inputMax[threadIdx.y]; DTYPE toMul = 1, sum = 1; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0; DTYPE r = log(sum / inputSum[threadIdx.y]);*/ // DTYPE r = log(exp(x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); /*if ((x[key] - inputMax[threadIdx.x]) < -100.0) { printf("kernal col value: %.3e %.3e %.3e\n", x[key], inputMax[threadIdx.y], x[key] - inputMax[threadIdx.y]); }*/ //fprintf(in, "%.3f\n", x[key] - inputMax[threadIdx.x]); DTYPE r = log(pow((float)2.0, x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); /*if (r < LOGPROB_MIN) { printf("min %e %e, %e %e, %e %e\n", r, x[key] - inputMax[threadIdx.y], x[key], inputMax[threadIdx.y], exp(x[key] - inputMax[threadIdx.y]), inputSum[threadIdx.y]); }*/ if (isnan(r)) r = LOGPROB_MIN; if (isinf(r)) r = LOGPROB_MIN; y[key] = MAX(r, LOGPROB_MIN); } } /*Half precision log softmax forward computation (Cuda kernel) for each row i, let y_{i,j} and x_{i,j} are the output and state value for the j-th element of row i. We have y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each row i >> sum - \sum_{j} e^{x_{i,j}) for each row i >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByColHalf(half * x, half * max, half * sum, half * y, int rowNum, int colNum) { __shared__ half inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ half inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; half r = hlog(hexp(x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); y[key] = r; } } /* int precision log softmax forward computation (Cuda kernel) for each row i, let y_{i,j} and x_{i,j} are the output and state value for the j-th element of row i. We have y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j}) >> x - input tensor (in matrix) >> max - the max value for each row i >> sum - \sum_{j} e^{x_{i,j}) for each row i >> y - output tensor (in matrix) >> rowNum - row number of the matrix >> colNum - column number of the matrix */ __global__ void KernelLogSoftmaxComputeByColINT(int * x, int * max, int * sum, int * y, int rowNum, int colNum) { __shared__ int inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; /* problem */ // half r = hlog(hexp(x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]); //cuda_fp16 line:1790 ... // original, log (exp) // int r = hlog(hexp(x[key] - inputMax[threadIdx.x])); //int value = x[key] - inputMax[threadIdx.y]; y[key] = x[key] - inputMax[threadIdx.y] - 10; // FUNCTION_INT(&value, &inputSum[threadIdx.y]); /* exp() taylor extension */ /* for the speed, directly use the register number */ /* very important things!!! here omit one because later will minus it */ /*int toMul = 1, sum = 1; toMul *= value; sum += toMul / 1; toMul *= value; sum += toMul / 2; toMul *= value; sum += toMul / 6;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ /* to ensure the int value can save the value, expand a value */ /*sum *= COEFFICIENT_FUNCTION_INT; sum /= inputSum[threadIdx.y]; */ /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3;*/ /* ... */ /* minus the value that multiply the coefficient affects the value */ // sum -= LN_COEFFICIENT_FUNCTION_INT; /* consider here is a larger than 1, so the log is useless */ /* here change the function as a - x */ } } __global__ void KernelLogSoftmaxComputeByColFloatTest(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum) { __shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; /* we keep the sum and max number in the shared memory for each row */ if (threadIdx.x == 0) { inputSum[threadIdx.y] = sum[i]; inputMax[threadIdx.y] = max[i]; } /* synchronize to make sure the values of max and sum are loaded */ __syncthreads(); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ if (i < rowNum && j < colNum) { int key = i * colNum + j; DTYPE value = x[key] - inputMax[threadIdx.y]; /* exp() taylor extension */ /* for the speed, directly use the register number */ DTYPE sum = exp(value); /*DTYPE toMul = 1.0, sum = 1.0; toMul *= value; sum += toMul / 1.0; toMul *= value; sum += toMul / 2.0; toMul *= value; sum += toMul / 6.0;*/ /* toMul *= value; sum += toMul / 24 ; */ /* toMul *= value; sum += toMul / 120; */ /* ... */ value = sum / inputSum[threadIdx.y]; /* log() taylor extension (in C++, log means ln()) */ /*value = sum - 1; sum = 0; toMul = 1; toMul *= value; sum += value; toMul *= -value; sum += toMul / 2; toMul *= -value; sum += toMul / 3; value = sum;*/ value = log(value); /* problem log can extend to test also */ //DTYPE r = log(exp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]); if (isnan(value)) value = LOGPROB_MIN; if (isinf(value)) value = LOGPROB_MIN; y[key] = MAX(value, LOGPROB_MIN); } } /* log scale softmax y = log(e^x / \sum_{i} e^{x_i}) (Cuda version) >> x - input vector >> y - result >> leadDim - leading dimension (along which we perform reduction) >> sum - \sum_{i} e^{x_i} >> max - \max_{i} e^{x_i} */ void _CudaLogSoftmaxSumMax(XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max) { CheckNTErrors((x->devID >= 0), "Forward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID), "Input tensors must be on the same GPU."); CheckNTErrors((x->order == y->order), "Input tensors must be of the same size."); CheckNTErrors((x->order == 2), "Input tensors must be of order 2."); int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ DTYPE * maxData = (DTYPE*)max->data; DTYPE * sumData = (DTYPE*)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRow << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByCol << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } } else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ __half * maxData = (half*)max->data; __half * sumData = (half*)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ /* int has been modified, into the int sentence */ KernelLogSoftmaxComputeByRowHalf << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((half*)x->data, maxData, sumData, (half *)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByColHalf << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((half*)x->data, maxData, sumData, (half*)y->data, n, m); } } else if (x->dataType == X_INT && y->dataType == X_INT) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ int * maxData = (int *)max->data; int * sumData = (int *)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRowINT << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((int *)x->data, maxData, sumData, (int *)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByColINT << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((int *)x->data, maxData, sumData, (int *)y->data, n, m); } } else { ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } void _CudaLogSoftmaxSumMaxFloatTest(XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max) { CheckNTErrors((x->devID >= 0), "Forward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID), "Input tensors must be on the same GPU."); CheckNTErrors((x->order == y->order), "Input tensors must be of the same size."); CheckNTErrors((x->order == 2), "Input tensors must be of order 2."); int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { int gridSize[3], blockSize[3]; int n = x->dimSize[0]; int m = x->dimSize[1]; /* allocate the buffer */ DTYPE * maxData = (DTYPE *)max->data; DTYPE * sumData = (DTYPE *)sum->data; if (leadDim == 0) { GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */ KernelLogSoftmaxComputeByRowFloatTest << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } else { GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize); /* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */ KernelLogSoftmaxComputeByColFloatTest << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> > ((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m); } } else { ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } /* set dE/dx = exp(y) >> dedy - dE/dy >> dedx - dE/dx >> y - output of the function >> size - size of output >> lossName - name of the loss function */ __global__ void KernelExpLoss(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size, LOSS_FUNCTION_NAME lossName) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { /* dE/dx_j = exp(y_j) */ if (lossName == CROSSENTROPY) dedx[i] = exp(y[i]); /* dE/dx_j = exp(y_j) */ else if (lossName == SQUAREDERROR) dedx[i] = exp(y[i]); else if (lossName == ONEHOTERROR) dedx[i] = 0; else dedx[i] = 0; } } /* backward computation for log softmax dE/dx = dE/dy * dy/dx >> dedy - dE/dy >> dedx - dE/dx >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> size - size of input/output >> lossName - name of the loss function */ __global__ void KernelLogSoftmaxBackwardDEDS(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x, int size, LOSS_FUNCTION_NAME lossName) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { DTYPE r = 0; /* dE/ds_j = exp(y_j) */ if (lossName == CROSSENTROPY) r = -gold[i] + exp(y[i]); /* origin *//* */ /* r = -gold[i]; /* here to change */ /* dE/ds_j = exp(y_j) */ else if (lossName == SQUAREDERROR) r = -gold[i] + exp(y[i]); else if (lossName == ONEHOTERROR) { if (gold[i] == 1.0F) r = -gold[i] + exp(y[i]); else r = 0; } else { r = dedy[i]; } if (isnan(r)) r = 0; if (isinf(r)) r = 0; dedx[i] = r; } } /* backward computation for log softmax (sparse matrices) for each column dE/dx_j += -gold_j (for dE/dx = dE/dy * dy/dx) >> dedy - dE/dy >> dedx - dE/dx >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> rowNum - row number of the matrix >> colNum - column number of the matrix >> gNonZeroNum - >> lossName - name of the loss function */ __global__ void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void * gold, DTYPE * y, DTYPE * x, int rowNum, int colNum, int gNonZeroNum, LOSS_FUNCTION_NAME lossName) { int tupleSize = sizeof(int) + sizeof(DTYPE); int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < gNonZeroNum) { /* load the sub-block of the sparse matrix b */ int key = *(int*)((char*)gold + tupleSize * k); int ni = key / colNum; int mi = key % colNum; int value = *(DTYPE*)((char*)gold + tupleSize * k + sizeof(int)); if (lossName == CROSSENTROPY) dedx[colNum * ni + mi] += -value; else if (lossName == SQUAREDERROR) dedx[colNum * ni + mi] += -value; else if (lossName == ONEHOTERROR) { int offset = colNum * ni + mi; if (value == 1.0F) dedx[offset] += (-value + exp(y[offset])); //dedx[offset] += -value * 0.005; } } } /* backward computation for dense matrics with default data type dE/dx = dE/dy * dy/dx log softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}) dy_i/dx_j = d{log(e^{x_i} / \sum_{k} e^{x_k})}/dx_j = d{log(e^{x_i})}/dx_j - d{log(\sum_{k} e^{x_k})}/dx_j = \delta(i,j) - e^{x_j}/\sum_{k} e^{x_k}) = \delta(i,j) - exp(y_j) where \delta(i,j) = 1 if i = j, and \delta(i,j) = 0 otherwise if loss E is defined as cross entropy, i.e., E = -\sum_{k} (gold_k * y_k), we have dE/dy_i = -gold_i (where {gold_k} is the gold standard distribution) then dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j} = \sum_{i} {-gold_i * (\delta(i,j) - exp(y_j))} = \sum_{i} {-gold_i * \delta{i,j)} + \sum_{i} {gold_i * exp(y_j)} = -gold_i * \delta(i,j) + \sum_{i} {gold_i * exp(y_j)} = -gold_j + exp(y_j) Note: gold_i is a distribution, i.e., \sum_{i} gold_i = 1 if gold is with a one-hot representation (gold_i = 1 for only one dimension), we can reformulize it as dE/dx_j = -\delta(i,j) + exp(y_j) There are two ways to implement this process. Method 1. we compute dE/dy and dy/dx resepectively, and then reach dE/dx by dE/dx = dE/dy * dy/dx (or more precisely dE/dx_j = \sum_{i} {dE/dy_i * dy_i/dx_j}) Method 2. we compute dE/dx (or dE/dx_j) in a single step, rather than resorting to the sub-models dE/dy and dy/dx. We can do this by using dE/dx_j = -gold_j + exp(y_j) Here we choose Method 2, i.e., we straightforwardly compute dE/dx_j by dE/dx_j = -gold_j + exp(y_j) (or dE/dx_j = -\delta(i,j) + exp(y_j) for a Maximum A Posteriori Estimation (MAP)) Method 1 is also fine but is more time consuming due to the summation over dimensions. Note that this method is not good for the standard version softmax when working with the cross entropy loss. Because it is numerical unstable. When we use a usual method to define softmax, we have softmax: y_i = log(e^{x_i} / \sum_{k} e^{x_k}). It is trivial to know that dy_i/dx_j = y_i * \delta(i,j) - y_i * y_j. As y_i and y_j could be a small number, y_i * y_i would result in a much smaller one with a risk of lossing precision. This is even worse we multiply dy_i/dx_j with dE/dy_i. So it is in general to use log softmax instead for better numerical stability. >> gold - gold standard to measure error (or loss) >> y - output of the function >> x - input of the function >> dedy - dE/dy >> deds - dE/dx >> lossName - type of loss function, e.g., cross entropy >> leadDim - leading dimension (along which we perform reduction) */ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, XTensor * dedy, XTensor * dedx, XTensor * padding, int leadDim, LOSS_FUNCTION_NAME lossName) { leadDim = leadDim < 0 ? y->order - 1 : leadDim; CheckNTErrors((x->devID >= 0), "Backward computation of log softmax must be run on GPUs."); CheckNTErrors((x->devID == y->devID && gold->devID == y->devID), "Tensors used in log softmax are not on the same GPU."); CheckNTErrors((gold != NULL), "No x gold standard is found!"); int leadDimRDI = y->order - leadDim - 1; int dimensionSize = y->dimSizeRDI[leadDimRDI]; int stride = 1; int blockSize = 1; int blockNum = 1; for (int i = 0; i < leadDimRDI; i++) stride *= y->dimSizeRDI[i]; blockSize = stride * dimensionSize; blockNum = y->unitNum / blockSize; int devIDBackup; ProtectCudaDev(x->devID, devIDBackup); if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) { CheckNTErrors((lossName == CROSSENTROPY || lossName == SQUAREDERROR || lossName == NOLOSS), "Unknown loss function."); int cudaGridSize[3], cudaBlockSize[3]; if (lossName == CROSSENTROPY || lossName == SQUAREDERROR) { if (gold->isSparse) { CheckNTErrors((gold->order == 2), "TODO!") CheckNTErrors((leadDim == 0), "TODO!"); GDevs.GetCudaThread(x->devID, x->unitNum, cudaGridSize, cudaBlockSize); /* dE/ds_j = exp(y_j) */ KernelExpLoss <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>> (NULL, (DTYPE*)dedx->data, (DTYPE*)y->data, dimensionSize * stride, lossName); GDevs.GetCudaThread(x->devID, gold->unitNumNonZero, cudaGridSize, cudaBlockSize); /* dE/ds_j += -gold_j */ KernelLogSoftmaxBackwardDEDSSparseByRow <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>> (NULL, (DTYPE*)dedx->data, (char*)gold->data + sizeof(int), (DTYPE*)y->data, (DTYPE*)x->data, dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName); } else { CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!"); for (int k = 0; k < blockNum; k++) { GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize); /* change inside here */ /* dE/ds_j = -gold_j + exp(y_j) */ KernelLogSoftmaxBackwardDEDS <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>> (NULL, (DTYPE*)dedx->data + k * blockSize, (DTYPE*)gold->data + k * blockSize, (DTYPE*)y->data + k * blockSize, (DTYPE*)x->data + k * blockSize, dimensionSize * stride, lossName); } } if(padding != NULL) { int n = leadDim; int paddingOrder = padding->order; int * paddingDims = new int[paddingOrder]; memcpy(paddingDims, padding->dimSize, padding->order * sizeof(int)); padding->Reshape(padding->unitNum); int order = dedx->order; int * dims = new int[order]; memcpy(dims, dedx->dimSize, dedx->order * sizeof(int)); dedx->Reshape(dedx->unitNum/dedx->GetDim(n), dedx->GetDim(n)); _MultiplyDimMe(dedx, padding, 0); padding->Reshape(paddingOrder, paddingDims); dedx->Reshape(order, dims); delete[] paddingDims; delete[] dims; } } else { ShowNTErrors("TODO!"); } } else{ ShowNTErrors("TODO!"); } BacktoCudaDev(x->devID, devIDBackup); } #endif } // namespace nts(NiuTrans.Tensor)
0dd17dc0415aa362bd87a4623b7bf90fb03e7695.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL) << "Unknown normalization region."; } } // TODO: check if it would be faster to just put it into the previous kernel. template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNFillScale), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNComputeOutput), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template <typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNComputeDiff), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
0dd17dc0415aa362bd87a4623b7bf90fb03e7695.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL) << "Unknown normalization region."; } } // TODO: check if it would be faster to just put it into the previous kernel. template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template <typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
44f378c73a2a386464988198d201cfe197bbab02.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include "../common/stopwatch.h" __device__ bool prime=1; __global__ void primeGPU(int N) { if(N==1) {prime=0; return;} if(N==2 || N==3) { return;} const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x; if(i>1 && N%i==0 && i<=sqrt((float)N)) { prime=0; return;} } __global__ void primeGPUUnroll2(int N) { if(N==1) {prime=0; return;} if(N==2 || N==3) { return;} const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x; const unsigned int j=i+blockDim.x; if(i>1 && N%i==0 && i<=sqrt((float)N)) { prime=0; return;} if(j>1 && N%j==0 && j<=sqrt((float)N)) { prime=0; return;} } __global__ void primeGPUUnroll4(int N) { if(N==1) {prime=0; return;} if(N==2 || N==3) { return;} const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x; const unsigned int j=i+blockDim.x; const unsigned int k=j+blockDim.x; const unsigned int l=k+blockDim.x; if(i>1 && N%i==0 && i<=sqrt((float)N)) { prime=0; return;} if(j>1 && N%j==0 && j<=sqrt((float)N)) { prime=0; return;} if(k>1 && N%k==0 && k<=sqrt((float)N)) { prime=0; return;} if(l>1 && N%l==0 && l<=sqrt((float)N)) { prime=0; return;} } bool primeCPU(int N) { if(N==1) return 0; if(N==2) return 1; if(N==3) return 1; if(N%2==0) return 0; for(int i=3;i<=sqrt(N);i++) { if(N%i==0) return 0; } return 1; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting main at ", argv[0]); printf("device %d: %s \n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); int B=512; int N=831595; if (argc > 1) B = atoi(argv[1]); if (argc > 2) N = atoi(argv[2]); Stopwatch s; bool tmp=primeCPU(N); printf("\nprimeCPU %f sec \n",s.elapsed()); dim3 block (B); dim3 grid ((N + block.x - 1) / block.x); s.reset(); hipLaunchKernelGGL(( primeGPU), dim3(grid),dim3(block), 0, 0, N); CHECK(hipDeviceSynchronize()); CHECK(hipGetLastError()); printf("primeGPU<<<%d,%d>>> elapsed %f sec \n", grid.x, block.x, s.elapsed()); bool tr=1; bool pr=0; CHECK(hipMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n"); else printf("Good\n"); hipMemcpyToSymbol(prime, &tr, sizeof(bool)); s.reset(); hipLaunchKernelGGL(( primeGPUUnroll2), dim3(grid.x/2),dim3(block), 0, 0, N); CHECK(hipDeviceSynchronize()); CHECK(hipGetLastError()); printf("primeGPUUnroll2<<<%d,%d>>> elapsed %f sec \n", grid.x/2, block.x, s.elapsed()); CHECK(hipMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n"); else printf("Good\n"); hipMemcpyToSymbol(prime, &tr, sizeof(bool)); s.reset(); hipLaunchKernelGGL(( primeGPUUnroll4), dim3(grid.x/4),dim3(block), 0, 0, N); CHECK(hipDeviceSynchronize()); CHECK(hipGetLastError()); printf("primeGPUUnroll4<<<%d,%d>>> elapsed %f sec \n", grid.x/2, block.x, s.elapsed()); CHECK(hipMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n"); else printf("Good\n"); hipMemcpyToSymbol(prime, &tr, sizeof(bool)); /* for(int i=9999000;i<10000000;i++) { bool tmp=primeCPU(i); dim3 block (B); dim3 grid ((i + block.x * 2- 1) / (block.x*2)); hipLaunchKernelGGL(( primeGPUUnroll4), dim3(grid.x/4),dim3(block), 0, 0, i); CHECK(hipDeviceSynchronize()); CHECK(hipGetLastError()); pr=0; CHECK(hipMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n %d \n",i); hipMemcpyToSymbol(prime, &tr, sizeof(bool)); } */ CHECK(hipDeviceReset()); return EXIT_SUCCESS; } /* a starting main at device 0: GeForce GTX 1050 primeCPU 0.000003 sec ==8896== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. ==8896== primeGPU<<<163,512>>> elapsed 0.285190 sec Replaying kernel "primeGPU(int)" (done) Good primeGPUUnroll2<<<81,512>>> elapsed 0.056494 sec ==8896== Replaying kernel "primeGPUUnroll2(int)" (done) Good ==8896== primeGPUUnroll4<<<81,512>>> elapsed 0.057359 sec Replaying kernel "primeGPUUnroll4(int)" (done) Good ==8896== Profiling application: a ==8896== Profiling result: ==8896== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GTX 1050 (0)" Kernel: primeGPUUnroll4(int) 1 dram_read_throughput Device Memory Read Throughput 752.26MB/s 752.26MB/s 752.26MB/s 1 dram_write_throughput Device Memory Write Throughput 77.653MB/s 77.653MB/s 77.653MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPUUnroll2(int) 1 dram_read_throughput Device Memory Read Throughput 377.32MB/s 377.32MB/s 377.32MB/s 1 dram_write_throughput Device Memory Write Throughput 49.757MB/s 49.757MB/s 49.757MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPU(int) 1 dram_read_throughput Device Memory Read Throughput 28.435MB/s 28.435MB/s 28.435MB/s 1 dram_write_throughput Device Memory Write Throughput 14.217MB/s 14.217MB/s 14.217MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 a starting main at device 0: GeForce GTX 1050 primeCPU 0.000003 sec ==7680== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. ==7680== primeGPU<<<2599,32>>> elapsed 0.264729 sec Replaying kernel "primeGPU(int)" (done) Good ==7680== primeGPUUnroll2<<<1299,32>>> elapsed 0.057661 sec Replaying kernel "primeGPUUnroll2(int)" (done) Good primeGPUUnroll4<<<1299,32>>> elapsed 0.058291 sec ==7680== Replaying kernel "primeGPUUnroll4(int)" (done) Good ==7680== Profiling application: a 32 ==7680== Profiling result: ==7680== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GTX 1050 (0)" Kernel: primeGPUUnroll4(int) 1 dram_read_throughput Device Memory Read Throughput 443.02MB/s 443.02MB/s 443.02MB/s 1 dram_write_throughput Device Memory Write Throughput 67.636MB/s 67.636MB/s 67.636MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPUUnroll2(int) 1 dram_read_throughput Device Memory Read Throughput 263.91MB/s 263.91MB/s 263.91MB/s 1 dram_write_throughput Device Memory Write Throughput 42.652MB/s 42.652MB/s 42.652MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPU(int) 1 dram_read_throughput Device Memory Read Throughput 28.575MB/s 28.575MB/s 28.574MB/s 1 dram_write_throughput Device Memory Write Throughput 7.1436MB/s 7.1436MB/s 7.1436MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 a starting main at device 0: GeForce GTX 1050 primeCPU 0.000003 sec ==4824== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. ==4824== primeGPU<<<325,256>>> elapsed 0.267788 sec Replaying kernel "primeGPU(int)" (done) Good ==4824== primeGPUUnroll2<<<162,256>>> elapsed 0.056880 sec Replaying kernel "primeGPUUnroll2(int)" (done) Good ==4824== primeGPUUnroll4<<<162,256>>> elapsed 0.059133 sec Replaying kernel "primeGPUUnroll4(int)" (done) Good ==4824== Profiling application: a 256 ==4824== Profiling result: ==4824== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GTX 1050 (0)" Kernel: primeGPUUnroll4(int) 1 dram_read_throughput Device Memory Read Throughput 506.22MB/s 506.22MB/s 506.22MB/s 1 dram_write_throughput Device Memory Write Throughput 54.237MB/s 54.237MB/s 54.237MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPUUnroll2(int) 1 dram_read_throughput Device Memory Read Throughput 413.46MB/s 413.46MB/s 413.46MB/s 1 dram_write_throughput Device Memory Write Throughput 0.00000B/s 0.00000B/s 0.00000B/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPU(int) 1 dram_read_throughput Device Memory Read Throughput 291.45MB/s 291.45MB/s 291.45MB/s 1 dram_write_throughput Device Memory Write Throughput 36.893MB/s 36.893MB/s 36.893MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 */
44f378c73a2a386464988198d201cfe197bbab02.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> #include <stdlib.h> #include "../common/stopwatch.h" __device__ bool prime=1; __global__ void primeGPU(int N) { if(N==1) {prime=0; return;} if(N==2 || N==3) { return;} const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x; if(i>1 && N%i==0 && i<=sqrt((float)N)) { prime=0; return;} } __global__ void primeGPUUnroll2(int N) { if(N==1) {prime=0; return;} if(N==2 || N==3) { return;} const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x; const unsigned int j=i+blockDim.x; if(i>1 && N%i==0 && i<=sqrt((float)N)) { prime=0; return;} if(j>1 && N%j==0 && j<=sqrt((float)N)) { prime=0; return;} } __global__ void primeGPUUnroll4(int N) { if(N==1) {prime=0; return;} if(N==2 || N==3) { return;} const unsigned int i=blockIdx.x*blockDim.x+threadIdx.x; const unsigned int j=i+blockDim.x; const unsigned int k=j+blockDim.x; const unsigned int l=k+blockDim.x; if(i>1 && N%i==0 && i<=sqrt((float)N)) { prime=0; return;} if(j>1 && N%j==0 && j<=sqrt((float)N)) { prime=0; return;} if(k>1 && N%k==0 && k<=sqrt((float)N)) { prime=0; return;} if(l>1 && N%l==0 && l<=sqrt((float)N)) { prime=0; return;} } bool primeCPU(int N) { if(N==1) return 0; if(N==2) return 1; if(N==3) return 1; if(N%2==0) return 0; for(int i=3;i<=sqrt(N);i++) { if(N%i==0) return 0; } return 1; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting main at ", argv[0]); printf("device %d: %s \n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); int B=512; int N=831595; if (argc > 1) B = atoi(argv[1]); if (argc > 2) N = atoi(argv[2]); Stopwatch s; bool tmp=primeCPU(N); printf("\nprimeCPU %f sec \n",s.elapsed()); dim3 block (B); dim3 grid ((N + block.x - 1) / block.x); s.reset(); primeGPU<<<grid,block>>>(N); CHECK(cudaDeviceSynchronize()); CHECK(cudaGetLastError()); printf("primeGPU<<<%d,%d>>> elapsed %f sec \n", grid.x, block.x, s.elapsed()); bool tr=1; bool pr=0; CHECK(cudaMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n"); else printf("Good\n"); cudaMemcpyToSymbol(prime, &tr, sizeof(bool)); s.reset(); primeGPUUnroll2<<<grid.x/2,block>>>(N); CHECK(cudaDeviceSynchronize()); CHECK(cudaGetLastError()); printf("primeGPUUnroll2<<<%d,%d>>> elapsed %f sec \n", grid.x/2, block.x, s.elapsed()); CHECK(cudaMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n"); else printf("Good\n"); cudaMemcpyToSymbol(prime, &tr, sizeof(bool)); s.reset(); primeGPUUnroll4<<<grid.x/4,block>>>(N); CHECK(cudaDeviceSynchronize()); CHECK(cudaGetLastError()); printf("primeGPUUnroll4<<<%d,%d>>> elapsed %f sec \n", grid.x/2, block.x, s.elapsed()); CHECK(cudaMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n"); else printf("Good\n"); cudaMemcpyToSymbol(prime, &tr, sizeof(bool)); /* for(int i=9999000;i<10000000;i++) { bool tmp=primeCPU(i); dim3 block (B); dim3 grid ((i + block.x * 2- 1) / (block.x*2)); primeGPUUnroll4<<<grid.x/4,block>>>(i); CHECK(cudaDeviceSynchronize()); CHECK(cudaGetLastError()); pr=0; CHECK(cudaMemcpyFromSymbol(&pr, prime, sizeof(bool))); if(tmp!=pr) printf("Bad\n %d \n",i); cudaMemcpyToSymbol(prime, &tr, sizeof(bool)); } */ CHECK(cudaDeviceReset()); return EXIT_SUCCESS; } /* a starting main at device 0: GeForce GTX 1050 primeCPU 0.000003 sec ==8896== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. ==8896== primeGPU<<<163,512>>> elapsed 0.285190 sec Replaying kernel "primeGPU(int)" (done) Good primeGPUUnroll2<<<81,512>>> elapsed 0.056494 sec ==8896== Replaying kernel "primeGPUUnroll2(int)" (done) Good ==8896== primeGPUUnroll4<<<81,512>>> elapsed 0.057359 sec Replaying kernel "primeGPUUnroll4(int)" (done) Good ==8896== Profiling application: a ==8896== Profiling result: ==8896== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GTX 1050 (0)" Kernel: primeGPUUnroll4(int) 1 dram_read_throughput Device Memory Read Throughput 752.26MB/s 752.26MB/s 752.26MB/s 1 dram_write_throughput Device Memory Write Throughput 77.653MB/s 77.653MB/s 77.653MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPUUnroll2(int) 1 dram_read_throughput Device Memory Read Throughput 377.32MB/s 377.32MB/s 377.32MB/s 1 dram_write_throughput Device Memory Write Throughput 49.757MB/s 49.757MB/s 49.757MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPU(int) 1 dram_read_throughput Device Memory Read Throughput 28.435MB/s 28.435MB/s 28.435MB/s 1 dram_write_throughput Device Memory Write Throughput 14.217MB/s 14.217MB/s 14.217MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 a starting main at device 0: GeForce GTX 1050 primeCPU 0.000003 sec ==7680== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. ==7680== primeGPU<<<2599,32>>> elapsed 0.264729 sec Replaying kernel "primeGPU(int)" (done) Good ==7680== primeGPUUnroll2<<<1299,32>>> elapsed 0.057661 sec Replaying kernel "primeGPUUnroll2(int)" (done) Good primeGPUUnroll4<<<1299,32>>> elapsed 0.058291 sec ==7680== Replaying kernel "primeGPUUnroll4(int)" (done) Good ==7680== Profiling application: a 32 ==7680== Profiling result: ==7680== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GTX 1050 (0)" Kernel: primeGPUUnroll4(int) 1 dram_read_throughput Device Memory Read Throughput 443.02MB/s 443.02MB/s 443.02MB/s 1 dram_write_throughput Device Memory Write Throughput 67.636MB/s 67.636MB/s 67.636MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPUUnroll2(int) 1 dram_read_throughput Device Memory Read Throughput 263.91MB/s 263.91MB/s 263.91MB/s 1 dram_write_throughput Device Memory Write Throughput 42.652MB/s 42.652MB/s 42.652MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPU(int) 1 dram_read_throughput Device Memory Read Throughput 28.575MB/s 28.575MB/s 28.574MB/s 1 dram_write_throughput Device Memory Write Throughput 7.1436MB/s 7.1436MB/s 7.1436MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 a starting main at device 0: GeForce GTX 1050 primeCPU 0.000003 sec ==4824== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics. ==4824== primeGPU<<<325,256>>> elapsed 0.267788 sec Replaying kernel "primeGPU(int)" (done) Good ==4824== primeGPUUnroll2<<<162,256>>> elapsed 0.056880 sec Replaying kernel "primeGPUUnroll2(int)" (done) Good ==4824== primeGPUUnroll4<<<162,256>>> elapsed 0.059133 sec Replaying kernel "primeGPUUnroll4(int)" (done) Good ==4824== Profiling application: a 256 ==4824== Profiling result: ==4824== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GTX 1050 (0)" Kernel: primeGPUUnroll4(int) 1 dram_read_throughput Device Memory Read Throughput 506.22MB/s 506.22MB/s 506.22MB/s 1 dram_write_throughput Device Memory Write Throughput 54.237MB/s 54.237MB/s 54.237MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPUUnroll2(int) 1 dram_read_throughput Device Memory Read Throughput 413.46MB/s 413.46MB/s 413.46MB/s 1 dram_write_throughput Device Memory Write Throughput 0.00000B/s 0.00000B/s 0.00000B/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 Kernel: primeGPU(int) 1 dram_read_throughput Device Memory Read Throughput 291.45MB/s 291.45MB/s 291.45MB/s 1 dram_write_throughput Device Memory Write Throughput 36.893MB/s 36.893MB/s 36.893MB/s 1 shared_load_transactions_per_request Shared Memory Load Transactions Per Request 0.000000 0.000000 0.000000 1 shared_store_transactions_per_request Shared Memory Store Transactions Per Request 0.000000 0.000000 0.000000 */
5a5df1e2a5e7164effcf9452af518ea9f9e8dce7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <decisiontree/decisiontree_impl.h> #include <gtest/gtest.h> #include <linalg/gemv.h> #include <linalg/transpose.h> #include <sys/stat.h> #include <test_utils.h> #include <treelite/c_api.h> #include <treelite/c_api_runtime.h> #include <cstdlib> #include <cuda_utils.cuh> #include <cuml/ensemble/randomforest.hpp> #include <fstream> #include <iostream> #include <limits> #include <random/rng.cuh> #include <string> namespace ML { using namespace MLCommon; template <typename T> // template useless for now. struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float rows_sample; int n_inference_rows; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_rows_per_node; float min_impurity_decrease; int n_streams; CRITERION split_criterion; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const RfInputs<T> &dims) { return os; } template <typename T, typename L> class RfTreeliteTestCommon : public ::testing::TestWithParam<RfInputs<T>> { protected: void ConcatenateTreeliteModels() { // Test the implementation for converting fitted forest into treelite format. ModelHandle concatenated_forest_handle; concatenated_forest_handle = concatenate_trees(treelite_indiv_handles); compare_concat_forest_to_subforests(concatenated_forest_handle, treelite_indiv_handles); std::string test_name = ::testing::UnitTest::GetInstance()->current_test_info()->name(); // Get the test index from Google current_test_info. // The test index is the string after '/' in test_name. std::string index_str = test_name.substr(test_name.find("/") + 1, test_name.length()); // Create a directory if the test is the first one in the test case. int mkdir_ret = mkdir(test_dir.c_str(), 0700); if (mkdir_ret != 0) { // Ignore the error if the error is caused by EEXIST. // Treelite will generate errors when the directory is not accessible. ASSERT(errno == EEXIST, "Call mkdir %s fails.", test_dir.c_str()); } // Create a sub-directory for the test case. dir_name = test_dir + index_str; CompilerHandle compiler; // "ast_navive" is the default compiler treelite used in their Python code. TREELITE_CHECK(TreeliteCompilerCreate("ast_native", &compiler)); int verbose = 0; // Generate C code in the directory specified below. // The parallel comilplation is disabled. To enable it, one needs to specify parallel_comp of CompilerHandle. // Treelite will create a directory if it doesn't exist. TREELITE_CHECK(TreeliteCompilerGenerateCode( compiler, treelite_indiv_handles[0], verbose, dir_name.c_str())); TREELITE_CHECK(TreeliteCompilerFree(compiler)); // Options copied from // https://github.com/dmlc/treelite/blob/528d883f8f39eb5dd633e929b95915b63e210b39/python/treelite/contrib/__init__.py. std::string obj_cmd = "gcc -c -O3 -o " + dir_name + "/main.o " + dir_name + "/main.c -fPIC " "-std=c99 -lm"; std::string lib_cmd = "gcc -shared -O3 -o " + dir_name + "/treelite_model.so " + dir_name + "/main.o -std=c99 -lm"; ASSERT(system(obj_cmd.c_str()) == 0, "Call %s fails.", obj_cmd.c_str()); ASSERT(system(lib_cmd.c_str()) == 0, "Call %s fails.", lib_cmd.c_str()); PredictorHandle predictor; std::string lib_path = dir_name + "/treelite_model.so"; // -1 means use maximum possible worker threads. int worker_thread = -1; TREELITE_CHECK( TreelitePredictorLoad(lib_path.c_str(), worker_thread, &predictor)); DenseBatchHandle dense_batch; // Current RF dosen't seem to support missing value, put NaN to be safe. float missing_value = std::numeric_limits<double>::quiet_NaN(); TREELITE_CHECK(TreeliteAssembleDenseBatch( inference_data_h.data(), missing_value, params.n_inference_rows, params.n_cols, &dense_batch)); // Use dense batch so batch_sparse is 0. // pred_margin = true means to produce raw margins rather than transformed probability. int batch_sparse = 0; bool pred_margin = false; // Allocate larger array for treelite predicted label with using multi-class classification to avoid seg faults. // Altough later we only use first params.n_inference_rows elements. size_t treelite_predicted_labels_size; TREELITE_CHECK(TreelitePredictorPredictBatch( predictor, dense_batch, batch_sparse, verbose, pred_margin, treelite_predicted_labels.data(), &treelite_predicted_labels_size)); TREELITE_CHECK(TreeliteDeleteDenseBatch(dense_batch)); TREELITE_CHECK(TreelitePredictorFree(predictor)); TREELITE_CHECK(TreeliteFreeModel(concatenated_forest_handle)); TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[0])); TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[1])); TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[2])); } void getResultAndCheck() { // Predict and compare against known labels predict(*handle, forest, inference_data_d, params.n_inference_rows, params.n_cols, predicted_labels_d); RF_metrics tmp = score(*handle, forest, labels_d, params.n_inference_rows, predicted_labels_d); CUDA_CHECK(hipStreamSynchronize(stream)); predicted_labels_h.resize(params.n_inference_rows); ref_predicted_labels.resize(params.n_inference_rows); updateHost(predicted_labels_h.data(), predicted_labels_d, params.n_inference_rows, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int i = 0; i < params.n_inference_rows; i++) { if (is_classification) { ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]); treelite_predicted_labels[i] = treelite_predicted_labels[i] >= 0.5 ? 1 : 0; } else { ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]); } } EXPECT_TRUE(devArrMatchHost( ref_predicted_labels.data(), treelite_predicted_labels.data(), params.n_inference_rows, Compare<float>(), stream)); } void SetUp() override { params = ::testing::TestWithParam<RfInputs<T>>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_rows_per_node, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.rows_sample, -1, params.n_streams, tree_params); handle.reset(new raft::handle_t(rf_params.n_streams)); data_len = params.n_rows * params.n_cols; inference_data_len = params.n_inference_rows * params.n_cols; allocate(data_d, data_len); allocate(inference_data_d, inference_data_len); allocate(labels_d, params.n_rows); allocate(predicted_labels_d, params.n_inference_rows); treelite_predicted_labels.resize(params.n_inference_rows); ref_predicted_labels.resize(params.n_inference_rows); CUDA_CHECK(hipStreamCreate(&stream)); handle->set_stream(stream); forest = new typename ML::RandomForestMetaData<T, L>; null_trees_ptr(forest); forest_2 = new typename ML::RandomForestMetaData<T, L>; null_trees_ptr(forest_2); forest_3 = new typename ML::RandomForestMetaData<T, L>; null_trees_ptr(forest_3); all_forest_info = {forest, forest_2, forest_3}; data_h.resize(data_len); inference_data_h.resize(inference_data_len); // Random number generator. raft::random::Rng r1(1234ULL); // Generate data_d is in column major order. r1.uniform(data_d, data_len, T(0.0), T(10.0), stream); raft::random::Rng r2(4321ULL); // Generate inference_data_d which is in row major order. r2.uniform(inference_data_d, inference_data_len, T(0.0), T(10.0), stream); updateHost(data_h.data(), data_d, data_len, stream); updateHost(inference_data_h.data(), inference_data_d, inference_data_len, stream); CUDA_CHECK(hipStreamSynchronize(stream)); } void TearDown() override { CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipFree(data_d)); CUDA_CHECK(hipFree(inference_data_d)); CUDA_CHECK(hipFree(labels_d)); CUDA_CHECK(hipFree(predicted_labels_d)); delete forest; delete forest_2; delete forest_3; all_forest_info.clear(); labels_h.clear(); predicted_labels_h.clear(); data_h.clear(); inference_data_h.clear(); treelite_predicted_labels.clear(); ref_predicted_labels.clear(); treelite_indiv_handles.clear(); } protected: RfInputs<T> params; RF_params rf_params; T *data_d, *inference_data_d; std::vector<T> data_h; std::vector<T> inference_data_h; std::vector<ModelHandle> treelite_indiv_handles; // Set to 1 for regression and 2 for binary classification // #class for multi-classification int task_category; int is_classification; int data_len; int inference_data_len; hipStream_t stream; std::shared_ptr<raft::handle_t> handle; std::vector<float> treelite_predicted_labels; std::vector<float> ref_predicted_labels; std::vector<ML::RandomForestMetaData<T, L> *> all_forest_info; std::string test_dir; std::string dir_name; L *labels_d, *predicted_labels_d; std::vector<L> labels_h; std::vector<L> predicted_labels_h; RandomForestMetaData<T, L> *forest; RandomForestMetaData<T, L> *forest_2; RandomForestMetaData<T, L> *forest_3; }; // namespace ML template <typename T, typename L> class RfConcatTestClf : public RfTreeliteTestCommon<T, L> { protected: void testClassifier() { this->test_dir = "./concat_test_clf/"; this->is_classification = 1; //task_category - 1 for regression, 2 for binary classification // #class for multi-class classification this->task_category = 2; float *weight, *temp_label_d, *temp_data_d; std::vector<float> temp_label_h; allocate(weight, this->params.n_cols); allocate(temp_label_d, this->params.n_rows); allocate(temp_data_d, this->data_len); raft::random::Rng r(1234ULL); // Generate weight for each feature. r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream); // Generate noise. r.uniform(temp_label_d, this->params.n_rows, T(0.0), T(10.0), this->stream); LinAlg::transpose<float>(this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols, this->handle->get_cublas_handle(), this->stream); LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows, weight, temp_label_d, true, 1.f, 1.f, this->handle->get_cublas_handle(), this->stream); temp_label_h.resize(this->params.n_rows); updateHost(temp_label_h.data(), temp_label_d, this->params.n_rows, this->stream); CUDA_CHECK(hipStreamSynchronize(this->stream)); int value; for (int i = 0; i < this->params.n_rows; i++) { // The value of temp_label is between 0 to 10*n_cols+noise_level(10). // Choose half of that as the theshold to balance two classes. if (temp_label_h[i] >= (10 * this->params.n_cols + 10) / 2.0) { value = 1; } else { value = 0; } this->labels_h.push_back(value); } updateDevice(this->labels_d, this->labels_h.data(), this->params.n_rows, this->stream); preprocess_labels(this->params.n_rows, this->labels_h, labels_map); for (int i = 0; i < 3; i++) { ModelHandle model; this->rf_params.n_trees = this->rf_params.n_trees + i; fit(*(this->handle), this->all_forest_info[i], this->data_d, this->params.n_rows, this->params.n_cols, this->labels_d, labels_map.size(), this->rf_params); build_treelite_forest(&model, this->all_forest_info[i], this->params.n_cols, this->task_category); this->treelite_indiv_handles.push_back(model); } CUDA_CHECK(hipStreamSynchronize(this->stream)); this->ConcatenateTreeliteModels(); this->getResultAndCheck(); postprocess_labels(this->params.n_rows, this->labels_h, this->labels_map); labels_map.clear(); temp_label_h.clear(); CUDA_CHECK(hipFree(weight)); CUDA_CHECK(hipFree(temp_label_d)); CUDA_CHECK(hipFree(temp_data_d)); } protected: std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 }; //------------------------------------------------------------------------------------------------------------------------------------- template <typename T, typename L> class RfConcatTestReg : public RfTreeliteTestCommon<T, L> { protected: void testRegressor() { this->test_dir = "./concat_test_reg/"; this->is_classification = 0; // task_category - 1 for regression, 2 for binary classification // #class for multi-class classification this->task_category = 1; float *weight, *temp_data_d; allocate(weight, this->params.n_cols); allocate(temp_data_d, this->data_len); raft::random::Rng r(1234ULL); // Generate weight for each feature. r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream); // Generate noise. r.uniform(this->labels_d, this->params.n_rows, T(0.0), T(10.0), this->stream); LinAlg::transpose<float>(this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols, this->handle->get_cublas_handle(), this->stream); LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows, weight, this->labels_d, true, 1.f, 1.f, this->handle->get_cublas_handle(), this->stream); this->labels_h.resize(this->params.n_rows); updateHost(this->labels_h.data(), this->labels_d, this->params.n_rows, this->stream); CUDA_CHECK(hipStreamSynchronize(this->stream)); for (int i = 0; i < 3; i++) { ModelHandle model; this->rf_params.n_trees = this->rf_params.n_trees + i; fit(*(this->handle), this->all_forest_info[i], this->data_d, this->params.n_rows, this->params.n_cols, this->labels_d, this->rf_params); build_treelite_forest(&model, this->all_forest_info[i], this->params.n_cols, this->task_category); CUDA_CHECK(hipStreamSynchronize(this->stream)); this->treelite_indiv_handles.push_back(model); } this->ConcatenateTreeliteModels(); this->getResultAndCheck(); CUDA_CHECK(hipFree(weight)); CUDA_CHECK(hipFree(temp_data_d)); } }; // //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs<float>> inputsf2_clf = { {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth 8, 4 bins {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth of 8, 4 bins {4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling) {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 0.0, 2, CRITERION:: CRITERION_END}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RfConcatTestClf<float, int> RfClassifierConcatTestF; TEST_P(RfClassifierConcatTestF, Convert_Clf) { testClassifier(); } INSTANTIATE_TEST_CASE_P(RfBinaryClassifierConcatTests, RfClassifierConcatTestF, ::testing::ValuesIn(inputsf2_clf)); const std::vector<RfInputs<float>> inputsf2_reg = { {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION:: CRITERION_END}, // CRITERION_END uses the default criterion (GINI for classification, MSE for regression) {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::CRITERION_END}}; typedef RfConcatTestReg<float, float> RfRegressorConcatTestF; TEST_P(RfRegressorConcatTestF, Convert_Reg) { testRegressor(); } INSTANTIATE_TEST_CASE_P(RfRegressorConcatTests, RfRegressorConcatTestF, ::testing::ValuesIn(inputsf2_reg)); } // end namespace ML
5a5df1e2a5e7164effcf9452af518ea9f9e8dce7.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <decisiontree/decisiontree_impl.h> #include <gtest/gtest.h> #include <linalg/gemv.h> #include <linalg/transpose.h> #include <sys/stat.h> #include <test_utils.h> #include <treelite/c_api.h> #include <treelite/c_api_runtime.h> #include <cstdlib> #include <cuda_utils.cuh> #include <cuml/ensemble/randomforest.hpp> #include <fstream> #include <iostream> #include <limits> #include <random/rng.cuh> #include <string> namespace ML { using namespace MLCommon; template <typename T> // template useless for now. struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float rows_sample; int n_inference_rows; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_rows_per_node; float min_impurity_decrease; int n_streams; CRITERION split_criterion; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const RfInputs<T> &dims) { return os; } template <typename T, typename L> class RfTreeliteTestCommon : public ::testing::TestWithParam<RfInputs<T>> { protected: void ConcatenateTreeliteModels() { // Test the implementation for converting fitted forest into treelite format. ModelHandle concatenated_forest_handle; concatenated_forest_handle = concatenate_trees(treelite_indiv_handles); compare_concat_forest_to_subforests(concatenated_forest_handle, treelite_indiv_handles); std::string test_name = ::testing::UnitTest::GetInstance()->current_test_info()->name(); // Get the test index from Google current_test_info. // The test index is the string after '/' in test_name. std::string index_str = test_name.substr(test_name.find("/") + 1, test_name.length()); // Create a directory if the test is the first one in the test case. int mkdir_ret = mkdir(test_dir.c_str(), 0700); if (mkdir_ret != 0) { // Ignore the error if the error is caused by EEXIST. // Treelite will generate errors when the directory is not accessible. ASSERT(errno == EEXIST, "Call mkdir %s fails.", test_dir.c_str()); } // Create a sub-directory for the test case. dir_name = test_dir + index_str; CompilerHandle compiler; // "ast_navive" is the default compiler treelite used in their Python code. TREELITE_CHECK(TreeliteCompilerCreate("ast_native", &compiler)); int verbose = 0; // Generate C code in the directory specified below. // The parallel comilplation is disabled. To enable it, one needs to specify parallel_comp of CompilerHandle. // Treelite will create a directory if it doesn't exist. TREELITE_CHECK(TreeliteCompilerGenerateCode( compiler, treelite_indiv_handles[0], verbose, dir_name.c_str())); TREELITE_CHECK(TreeliteCompilerFree(compiler)); // Options copied from // https://github.com/dmlc/treelite/blob/528d883f8f39eb5dd633e929b95915b63e210b39/python/treelite/contrib/__init__.py. std::string obj_cmd = "gcc -c -O3 -o " + dir_name + "/main.o " + dir_name + "/main.c -fPIC " "-std=c99 -lm"; std::string lib_cmd = "gcc -shared -O3 -o " + dir_name + "/treelite_model.so " + dir_name + "/main.o -std=c99 -lm"; ASSERT(system(obj_cmd.c_str()) == 0, "Call %s fails.", obj_cmd.c_str()); ASSERT(system(lib_cmd.c_str()) == 0, "Call %s fails.", lib_cmd.c_str()); PredictorHandle predictor; std::string lib_path = dir_name + "/treelite_model.so"; // -1 means use maximum possible worker threads. int worker_thread = -1; TREELITE_CHECK( TreelitePredictorLoad(lib_path.c_str(), worker_thread, &predictor)); DenseBatchHandle dense_batch; // Current RF dosen't seem to support missing value, put NaN to be safe. float missing_value = std::numeric_limits<double>::quiet_NaN(); TREELITE_CHECK(TreeliteAssembleDenseBatch( inference_data_h.data(), missing_value, params.n_inference_rows, params.n_cols, &dense_batch)); // Use dense batch so batch_sparse is 0. // pred_margin = true means to produce raw margins rather than transformed probability. int batch_sparse = 0; bool pred_margin = false; // Allocate larger array for treelite predicted label with using multi-class classification to avoid seg faults. // Altough later we only use first params.n_inference_rows elements. size_t treelite_predicted_labels_size; TREELITE_CHECK(TreelitePredictorPredictBatch( predictor, dense_batch, batch_sparse, verbose, pred_margin, treelite_predicted_labels.data(), &treelite_predicted_labels_size)); TREELITE_CHECK(TreeliteDeleteDenseBatch(dense_batch)); TREELITE_CHECK(TreelitePredictorFree(predictor)); TREELITE_CHECK(TreeliteFreeModel(concatenated_forest_handle)); TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[0])); TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[1])); TREELITE_CHECK(TreeliteFreeModel(treelite_indiv_handles[2])); } void getResultAndCheck() { // Predict and compare against known labels predict(*handle, forest, inference_data_d, params.n_inference_rows, params.n_cols, predicted_labels_d); RF_metrics tmp = score(*handle, forest, labels_d, params.n_inference_rows, predicted_labels_d); CUDA_CHECK(cudaStreamSynchronize(stream)); predicted_labels_h.resize(params.n_inference_rows); ref_predicted_labels.resize(params.n_inference_rows); updateHost(predicted_labels_h.data(), predicted_labels_d, params.n_inference_rows, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int i = 0; i < params.n_inference_rows; i++) { if (is_classification) { ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]); treelite_predicted_labels[i] = treelite_predicted_labels[i] >= 0.5 ? 1 : 0; } else { ref_predicted_labels[i] = static_cast<float>(predicted_labels_h[i]); } } EXPECT_TRUE(devArrMatchHost( ref_predicted_labels.data(), treelite_predicted_labels.data(), params.n_inference_rows, Compare<float>(), stream)); } void SetUp() override { params = ::testing::TestWithParam<RfInputs<T>>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_rows_per_node, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.rows_sample, -1, params.n_streams, tree_params); handle.reset(new raft::handle_t(rf_params.n_streams)); data_len = params.n_rows * params.n_cols; inference_data_len = params.n_inference_rows * params.n_cols; allocate(data_d, data_len); allocate(inference_data_d, inference_data_len); allocate(labels_d, params.n_rows); allocate(predicted_labels_d, params.n_inference_rows); treelite_predicted_labels.resize(params.n_inference_rows); ref_predicted_labels.resize(params.n_inference_rows); CUDA_CHECK(cudaStreamCreate(&stream)); handle->set_stream(stream); forest = new typename ML::RandomForestMetaData<T, L>; null_trees_ptr(forest); forest_2 = new typename ML::RandomForestMetaData<T, L>; null_trees_ptr(forest_2); forest_3 = new typename ML::RandomForestMetaData<T, L>; null_trees_ptr(forest_3); all_forest_info = {forest, forest_2, forest_3}; data_h.resize(data_len); inference_data_h.resize(inference_data_len); // Random number generator. raft::random::Rng r1(1234ULL); // Generate data_d is in column major order. r1.uniform(data_d, data_len, T(0.0), T(10.0), stream); raft::random::Rng r2(4321ULL); // Generate inference_data_d which is in row major order. r2.uniform(inference_data_d, inference_data_len, T(0.0), T(10.0), stream); updateHost(data_h.data(), data_d, data_len, stream); updateHost(inference_data_h.data(), inference_data_d, inference_data_len, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaFree(data_d)); CUDA_CHECK(cudaFree(inference_data_d)); CUDA_CHECK(cudaFree(labels_d)); CUDA_CHECK(cudaFree(predicted_labels_d)); delete forest; delete forest_2; delete forest_3; all_forest_info.clear(); labels_h.clear(); predicted_labels_h.clear(); data_h.clear(); inference_data_h.clear(); treelite_predicted_labels.clear(); ref_predicted_labels.clear(); treelite_indiv_handles.clear(); } protected: RfInputs<T> params; RF_params rf_params; T *data_d, *inference_data_d; std::vector<T> data_h; std::vector<T> inference_data_h; std::vector<ModelHandle> treelite_indiv_handles; // Set to 1 for regression and 2 for binary classification // #class for multi-classification int task_category; int is_classification; int data_len; int inference_data_len; cudaStream_t stream; std::shared_ptr<raft::handle_t> handle; std::vector<float> treelite_predicted_labels; std::vector<float> ref_predicted_labels; std::vector<ML::RandomForestMetaData<T, L> *> all_forest_info; std::string test_dir; std::string dir_name; L *labels_d, *predicted_labels_d; std::vector<L> labels_h; std::vector<L> predicted_labels_h; RandomForestMetaData<T, L> *forest; RandomForestMetaData<T, L> *forest_2; RandomForestMetaData<T, L> *forest_3; }; // namespace ML template <typename T, typename L> class RfConcatTestClf : public RfTreeliteTestCommon<T, L> { protected: void testClassifier() { this->test_dir = "./concat_test_clf/"; this->is_classification = 1; //task_category - 1 for regression, 2 for binary classification // #class for multi-class classification this->task_category = 2; float *weight, *temp_label_d, *temp_data_d; std::vector<float> temp_label_h; allocate(weight, this->params.n_cols); allocate(temp_label_d, this->params.n_rows); allocate(temp_data_d, this->data_len); raft::random::Rng r(1234ULL); // Generate weight for each feature. r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream); // Generate noise. r.uniform(temp_label_d, this->params.n_rows, T(0.0), T(10.0), this->stream); LinAlg::transpose<float>(this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols, this->handle->get_cublas_handle(), this->stream); LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows, weight, temp_label_d, true, 1.f, 1.f, this->handle->get_cublas_handle(), this->stream); temp_label_h.resize(this->params.n_rows); updateHost(temp_label_h.data(), temp_label_d, this->params.n_rows, this->stream); CUDA_CHECK(cudaStreamSynchronize(this->stream)); int value; for (int i = 0; i < this->params.n_rows; i++) { // The value of temp_label is between 0 to 10*n_cols+noise_level(10). // Choose half of that as the theshold to balance two classes. if (temp_label_h[i] >= (10 * this->params.n_cols + 10) / 2.0) { value = 1; } else { value = 0; } this->labels_h.push_back(value); } updateDevice(this->labels_d, this->labels_h.data(), this->params.n_rows, this->stream); preprocess_labels(this->params.n_rows, this->labels_h, labels_map); for (int i = 0; i < 3; i++) { ModelHandle model; this->rf_params.n_trees = this->rf_params.n_trees + i; fit(*(this->handle), this->all_forest_info[i], this->data_d, this->params.n_rows, this->params.n_cols, this->labels_d, labels_map.size(), this->rf_params); build_treelite_forest(&model, this->all_forest_info[i], this->params.n_cols, this->task_category); this->treelite_indiv_handles.push_back(model); } CUDA_CHECK(cudaStreamSynchronize(this->stream)); this->ConcatenateTreeliteModels(); this->getResultAndCheck(); postprocess_labels(this->params.n_rows, this->labels_h, this->labels_map); labels_map.clear(); temp_label_h.clear(); CUDA_CHECK(cudaFree(weight)); CUDA_CHECK(cudaFree(temp_label_d)); CUDA_CHECK(cudaFree(temp_data_d)); } protected: std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 }; //------------------------------------------------------------------------------------------------------------------------------------- template <typename T, typename L> class RfConcatTestReg : public RfTreeliteTestCommon<T, L> { protected: void testRegressor() { this->test_dir = "./concat_test_reg/"; this->is_classification = 0; // task_category - 1 for regression, 2 for binary classification // #class for multi-class classification this->task_category = 1; float *weight, *temp_data_d; allocate(weight, this->params.n_cols); allocate(temp_data_d, this->data_len); raft::random::Rng r(1234ULL); // Generate weight for each feature. r.uniform(weight, this->params.n_cols, T(0.0), T(1.0), this->stream); // Generate noise. r.uniform(this->labels_d, this->params.n_rows, T(0.0), T(10.0), this->stream); LinAlg::transpose<float>(this->data_d, temp_data_d, this->params.n_rows, this->params.n_cols, this->handle->get_cublas_handle(), this->stream); LinAlg::gemv<float>(temp_data_d, this->params.n_cols, this->params.n_rows, weight, this->labels_d, true, 1.f, 1.f, this->handle->get_cublas_handle(), this->stream); this->labels_h.resize(this->params.n_rows); updateHost(this->labels_h.data(), this->labels_d, this->params.n_rows, this->stream); CUDA_CHECK(cudaStreamSynchronize(this->stream)); for (int i = 0; i < 3; i++) { ModelHandle model; this->rf_params.n_trees = this->rf_params.n_trees + i; fit(*(this->handle), this->all_forest_info[i], this->data_d, this->params.n_rows, this->params.n_cols, this->labels_d, this->rf_params); build_treelite_forest(&model, this->all_forest_info[i], this->params.n_cols, this->task_category); CUDA_CHECK(cudaStreamSynchronize(this->stream)); this->treelite_indiv_handles.push_back(model); } this->ConcatenateTreeliteModels(); this->getResultAndCheck(); CUDA_CHECK(cudaFree(weight)); CUDA_CHECK(cudaFree(temp_data_d)); } }; // //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs<float>> inputsf2_clf = { {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth 8, 4 bins {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::GINI}, // single tree forest, bootstrap false, depth of 8, 4 bins {4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling) {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION:: GINI}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 0.0, 2, CRITERION:: CRITERION_END}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::ENTROPY}, {4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RfConcatTestClf<float, int> RfClassifierConcatTestF; TEST_P(RfClassifierConcatTestF, Convert_Clf) { testClassifier(); } INSTANTIATE_TEST_CASE_P(RfBinaryClassifierConcatTests, RfClassifierConcatTestF, ::testing::ValuesIn(inputsf2_clf)); const std::vector<RfInputs<float>> inputsf2_reg = { {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::MSE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION:: CRITERION_END}, // CRITERION_END uses the default criterion (GINI for classification, MSE for regression) {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 1, 1.0f, 1.0f, 4, 7, -1, false, false, 4, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 0.0, 2, CRITERION::MAE}, {4, 2, 5, 1.0f, 1.0f, 4, 7, -1, true, false, 4, SPLIT_ALGO::HIST, 2, 0.0, 2, CRITERION::CRITERION_END}}; typedef RfConcatTestReg<float, float> RfRegressorConcatTestF; TEST_P(RfRegressorConcatTestF, Convert_Reg) { testRegressor(); } INSTANTIATE_TEST_CASE_P(RfRegressorConcatTests, RfRegressorConcatTestF, ::testing::ValuesIn(inputsf2_reg)); } // end namespace ML
c011397a0b73809d3bc38a3c3e5d0f55d0c4a76e.hip
// !!! This is a file automatically generated by hipify!!! #include "encoder.h" #include "kernels/transformerKernels.h" #include "kernels/multilgKernels.h" /** @file Transformer encoder, composed by gemm lib and custom cuda kernel function */ namespace lightseq { namespace cuda { template <OperationType OpType_> Encoder<OpType_>::Encoder(int max_batch_size, const int *p_d_token_id, int *p_d_padding_mask, _DataType *p_d_output, const TransformerWeight<OpType_> &tw, hipStream_t stream, hipblasHandle_t hd) : _max_batch_size(max_batch_size), _p_d_token_id(p_d_token_id), _p_d_padding_mask(p_d_padding_mask), _p_d_output(p_d_output), _tw(tw), _stream(stream), _hd(hd), _p_d_src_emb_wei(tw.get_src_emb_wei()), _p_d_enc_wei(tw.get_enc_wei()), _fone((_DataType)1.f), _fzero((_DataType)0.f), _atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)), _max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size), _max_thread_per_block(1024) {} /** Compute GPU memory size needed by transformer encoder, to see how these memory is used, checkout init_buffer() for detail */ template <OperationType OpType_> long Encoder<OpType_>::compute_buffer_bytesize() { long sz1 = _max_batch_dim * 6 + _max_batch_size * _tw._head_num * _tw._max_step * _tw._max_step; long sz2 = _max_batch_dim + _max_batch_size * _tw._max_step * _tw._inner_size; return max(sz1, sz2) * sizeof(_DataType); } /** Init the GPU memory pointer which point to the memory buffer needed by encoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void Encoder<OpType_>::init_buffer(void *pbuf) { _DataType *p_d_buf = reinterpret_cast<_DataType *>(pbuf); _p_d_qkv_projected = p_d_buf; _p_d_q = _p_d_qkv_projected + _max_batch_dim * 3; _p_d_k = _p_d_q + _max_batch_dim; _p_d_v = _p_d_k + _max_batch_dim; _p_d_c = _p_d_v + _max_batch_dim; _p_d_ffn_buf1 = p_d_buf; _p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim; return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string Encoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { return "violate dim_per_head % 2 = 0"; } if (_tw._is_multilingual == false && _p_d_src_emb_wei.size() != 4) { return "violate p_d_src_emb_wei.size() = 4"; } if (_tw._is_multilingual && _p_d_src_emb_wei.size() != 5) { return "violate p_d_src_emb_wei.size() = 5"; } if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) { return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer"; } return ""; } /** Encoder inference */ template <OperationType OpType_> void Encoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { /* ---step1. init--- */ _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len); #endif /* ---step2. encoder feedforward--- */ if (_tw._is_multilingual) { ker_multilg_enc_emb_launcher<_DataType>( batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_src_emb_wei[4], //_p_d_src_emb_wei[1], _p_d_src_emb_wei[1], _p_d_token_id, _p_d_output, _p_d_padding_mask, _tw._padding_id, _max_thread_per_block); } else { ker_enc_embedding_launcher<_DataType>( batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_token_id, _p_d_output, _p_d_padding_mask, _tw._padding_id, _max_thread_per_block); } #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "emb out: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "emb out", 10); } } // not normal #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "encoder output: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "encoder_output", _tw._dim_per_head); } } // not normal #endif return; } /** Encoder self attention */ template <OperationType OpType_> void Encoder<OpType_>::self_attention() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_q, _p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1], _p_d_enc_wei[_weight_offset + 5], _max_thread_per_block, _tw._is_post_ln); /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero, _p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // get q, k, v by split and reshape qkv ker_arrange_encself_qkv_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected, _p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx( _hd, HIPBLAS_OP_T, HIPBLAS_OP_N, _batch_seq_len, _batch_seq_len, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len * _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_correlation_softmax_encself_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_padding_mask); /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._dim_per_head, _batch_seq_len, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_output, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template <OperationType OpType_> void Encoder<OpType_>::ffn_add_norm() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_ffn_buf1, _p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7], _p_d_enc_wei[_weight_offset + 11], _max_thread_per_block, _tw._is_post_ln); /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._inner_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType, _tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero, _p_d_ffn_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); if (_tw._use_gelu) { ker_bias_gelu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); } else { ker_bias_relu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); } /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone, _p_d_output, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template class Encoder<OperationType::FP16>; template class Encoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
c011397a0b73809d3bc38a3c3e5d0f55d0c4a76e.cu
#include "encoder.h" #include "kernels/transformerKernels.h" #include "kernels/multilgKernels.h" /** @file Transformer encoder, composed by gemm lib and custom cuda kernel function */ namespace lightseq { namespace cuda { template <OperationType OpType_> Encoder<OpType_>::Encoder(int max_batch_size, const int *p_d_token_id, int *p_d_padding_mask, _DataType *p_d_output, const TransformerWeight<OpType_> &tw, cudaStream_t stream, cublasHandle_t hd) : _max_batch_size(max_batch_size), _p_d_token_id(p_d_token_id), _p_d_padding_mask(p_d_padding_mask), _p_d_output(p_d_output), _tw(tw), _stream(stream), _hd(hd), _p_d_src_emb_wei(tw.get_src_emb_wei()), _p_d_enc_wei(tw.get_enc_wei()), _fone((_DataType)1.f), _fzero((_DataType)0.f), _atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)), _max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size), _max_thread_per_block(1024) {} /** Compute GPU memory size needed by transformer encoder, to see how these memory is used, checkout init_buffer() for detail */ template <OperationType OpType_> long Encoder<OpType_>::compute_buffer_bytesize() { long sz1 = _max_batch_dim * 6 + _max_batch_size * _tw._head_num * _tw._max_step * _tw._max_step; long sz2 = _max_batch_dim + _max_batch_size * _tw._max_step * _tw._inner_size; return max(sz1, sz2) * sizeof(_DataType); } /** Init the GPU memory pointer which point to the memory buffer needed by encoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void Encoder<OpType_>::init_buffer(void *pbuf) { _DataType *p_d_buf = reinterpret_cast<_DataType *>(pbuf); _p_d_qkv_projected = p_d_buf; _p_d_q = _p_d_qkv_projected + _max_batch_dim * 3; _p_d_k = _p_d_q + _max_batch_dim; _p_d_v = _p_d_k + _max_batch_dim; _p_d_c = _p_d_v + _max_batch_dim; _p_d_ffn_buf1 = p_d_buf; _p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim; return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string Encoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { return "violate dim_per_head % 2 = 0"; } if (_tw._is_multilingual == false && _p_d_src_emb_wei.size() != 4) { return "violate p_d_src_emb_wei.size() = 4"; } if (_tw._is_multilingual && _p_d_src_emb_wei.size() != 5) { return "violate p_d_src_emb_wei.size() = 5"; } if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) { return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer"; } return ""; } /** Encoder inference */ template <OperationType OpType_> void Encoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { /* ---step1. init--- */ _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len); #endif /* ---step2. encoder feedforward--- */ if (_tw._is_multilingual) { ker_multilg_enc_emb_launcher<_DataType>( batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_src_emb_wei[4], //_p_d_src_emb_wei[1], _p_d_src_emb_wei[1], _p_d_token_id, _p_d_output, _p_d_padding_mask, _tw._padding_id, _max_thread_per_block); } else { ker_enc_embedding_launcher<_DataType>( batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_token_id, _p_d_output, _p_d_padding_mask, _tw._padding_id, _max_thread_per_block); } #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "emb out: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "emb out", 10); } } // not normal #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "encoder output: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "encoder_output", _tw._dim_per_head); } } // not normal #endif return; } /** Encoder self attention */ template <OperationType OpType_> void Encoder<OpType_>::self_attention() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_q, _p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1], _p_d_enc_wei[_weight_offset + 5], _max_thread_per_block, _tw._is_post_ln); /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero, _p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // get q, k, v by split and reshape qkv ker_arrange_encself_qkv_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected, _p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _batch_seq_len, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len * _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_correlation_softmax_encself_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_padding_mask); /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _batch_seq_len, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_output, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template <OperationType OpType_> void Encoder<OpType_>::ffn_add_norm() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_ffn_buf1, _p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7], _p_d_enc_wei[_weight_offset + 11], _max_thread_per_block, _tw._is_post_ln); /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType, _tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero, _p_d_ffn_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); if (_tw._use_gelu) { ker_bias_gelu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); } else { ker_bias_relu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); } /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone, _p_d_output, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template class Encoder<OperationType::FP16>; template class Encoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
3f7c97d40d62c55701be1a044e6cb8bc634b0858.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * * ANIMATION PROCESSING * *******************************************************************************/ #include "animate.h" #include <stdio.h> /******************************************************************************/ __global__ void drawColor(unsigned char* optr, const float* red, const float* green, const float* blue) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x ; float theRed = red[offset]; if (theRed < 0) theRed = 0; if (theRed > 1) theRed = 1; float theGreen = green[offset]; if (theGreen < 0) theGreen = 0; if (theGreen > 1) theGreen = 1; float theBlue = blue[offset]; if (theBlue < 0) theBlue = 0; if (theBlue > 1) theBlue = 1; // convert RGB values from 0-1 to 0-255 optr[offset * 4 + 0] = 255 * theRed; // red optr[offset * 4 + 1] = 255 * theGreen; // green optr[offset * 4 + 2] = 255 * theBlue; // blue optr[offset * 4 + 3] = 255; // alpha (opacity) } /******************************************************************************/ void CPUAnimBitmap::drawPalette(void) { dim3 threads(32, 32); dim3 blocks(ceil(width/32), ceil(height/32)); hipLaunchKernelGGL(( drawColor) , dim3(blocks), dim3(threads) , 0, 0, dev_bitmap, thePalette->red, thePalette->green, thePalette->blue); hipMemcpy(get_ptr(), dev_bitmap, image_size(), hipMemcpyDeviceToHost); glutMainLoopEvent(); glutPostRedisplay(); } /******************************************************************************/ CPUAnimBitmap::CPUAnimBitmap(GPU_Palette* P1) { width = P1->palette_width; height = P1->palette_height; pixels = new unsigned char[width * height * 4]; thePalette = P1; } /******************************************************************************/ CPUAnimBitmap::~CPUAnimBitmap() { delete[] pixels; } /******************************************************************************/ CPUAnimBitmap** CPUAnimBitmap::get_bitmap_ptr(void) { static CPUAnimBitmap* gBitmap; return &gBitmap; } /******************************************************************************/ void CPUAnimBitmap::Draw(void) { CPUAnimBitmap* bitmap = *(get_bitmap_ptr()); glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels); glutSwapBuffers(); } /******************************************************************************/ void CPUAnimBitmap::initAnimation() { CPUAnimBitmap** bitmap = get_bitmap_ptr(); *bitmap = this; int c = 1; char* dummy = ""; glutInit(&c, &dummy); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(width, height); glutCreateWindow("MyWindow"); glutDisplayFunc(Draw); } /******************************************************************************/
3f7c97d40d62c55701be1a044e6cb8bc634b0858.cu
/******************************************************************************* * * ANIMATION PROCESSING * *******************************************************************************/ #include "animate.h" #include <stdio.h> /******************************************************************************/ __global__ void drawColor(unsigned char* optr, const float* red, const float* green, const float* blue) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x ; float theRed = red[offset]; if (theRed < 0) theRed = 0; if (theRed > 1) theRed = 1; float theGreen = green[offset]; if (theGreen < 0) theGreen = 0; if (theGreen > 1) theGreen = 1; float theBlue = blue[offset]; if (theBlue < 0) theBlue = 0; if (theBlue > 1) theBlue = 1; // convert RGB values from 0-1 to 0-255 optr[offset * 4 + 0] = 255 * theRed; // red optr[offset * 4 + 1] = 255 * theGreen; // green optr[offset * 4 + 2] = 255 * theBlue; // blue optr[offset * 4 + 3] = 255; // alpha (opacity) } /******************************************************************************/ void CPUAnimBitmap::drawPalette(void) { dim3 threads(32, 32); dim3 blocks(ceil(width/32), ceil(height/32)); drawColor <<< blocks, threads >>> (dev_bitmap, thePalette->red, thePalette->green, thePalette->blue); cudaMemcpy(get_ptr(), dev_bitmap, image_size(), cudaMemcpyDeviceToHost); glutMainLoopEvent(); glutPostRedisplay(); } /******************************************************************************/ CPUAnimBitmap::CPUAnimBitmap(GPU_Palette* P1) { width = P1->palette_width; height = P1->palette_height; pixels = new unsigned char[width * height * 4]; thePalette = P1; } /******************************************************************************/ CPUAnimBitmap::~CPUAnimBitmap() { delete[] pixels; } /******************************************************************************/ CPUAnimBitmap** CPUAnimBitmap::get_bitmap_ptr(void) { static CPUAnimBitmap* gBitmap; return &gBitmap; } /******************************************************************************/ void CPUAnimBitmap::Draw(void) { CPUAnimBitmap* bitmap = *(get_bitmap_ptr()); glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels); glutSwapBuffers(); } /******************************************************************************/ void CPUAnimBitmap::initAnimation() { CPUAnimBitmap** bitmap = get_bitmap_ptr(); *bitmap = this; int c = 1; char* dummy = ""; glutInit(&c, &dummy); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(width, height); glutCreateWindow("MyWindow"); glutDisplayFunc(Draw); } /******************************************************************************/
08b4d33bceb734c1ac7763a63967e2f2de50c804.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define CBLACK "\33[30m" #define CRED "\33[31m" #define CGREEN "\33[32m" #define CWHITE "\33[37m" #define SIT_SIZE 500 #define NBR_COIN 162 #define NBR_COIN_CUDA 162 #define NBR_BLOCK 1024 #define NBR_HIGH_SCORE 50 #define MIN_PRICE 0.000620 #define TIME_GUESS 100 #define COIN_TEST 98 #define AMOUNT_BET 100 #define MIN_POURCENT_GUESS 0.001 #define NBR_MINUTES 881003 #define AMOUNT_TEST 881003 typedef struct { double open; double high; double low; double close; double volume; } Data; typedef struct { double time; Data data[NBR_COIN]; } Minute; typedef struct { int score; int minuteId; int coinId; } Score; typedef struct { Score highScores[NBR_HIGH_SCORE]; double *guessed; /**Cuda memory */ Minute **minutes; // all history Minute **srcPourcent; int *scores; } Env; typedef struct { int cursor; int coinId; } Situation; Env env; /** * Clear visual field */ void clear() { dprintf(1, "#CLS\n"); } /** * Launch the great machine comparator * Comparing pourcent source with all other minutes */ __global__ void bake(Minute **source, int sourceCoinId, int cursor, Minute **minutes, int *scores) { int coinId = threadIdx.x; int minuteId = blockIdx.x; double score = 0; if (minutes[cursor + minuteId]->data[coinId].open < MIN_PRICE) { scores[NBR_COIN_CUDA * minuteId + coinId] = -1; return; } for (int i = 0; i < SIT_SIZE; i++) { if (minutes[cursor + minuteId + i]->data[coinId].open == -1) { scores[NBR_COIN_CUDA * minuteId + coinId] = -1; return; } double pourcent = minutes[cursor + minuteId + i]->data[coinId].open / minutes[cursor + minuteId]->data[coinId].open * 100; score += fabs(fabs(source[i]->data[sourceCoinId].open) - fabs(pourcent)); } scores[NBR_COIN_CUDA * minuteId + coinId] = score; } /** * Generate a random number */ int random_number(int min_num, int max_num) { int result = (rand() % (max_num - min_num)) + min_num; return result; } /** * Load history in RAM and VRAM */ Minute **loadHistory(int start, int amount) { int fd = open("../data/bin/full", O_RDONLY); Minute **minutes; hipMallocManaged(&minutes, sizeof(void *) * amount); int i = -1; while (1) { i++; hipMallocManaged(&minutes[i], sizeof(Minute)); if (read(fd, minutes[i], sizeof(Minute)) < 1 || i == AMOUNT_TEST) break; } return minutes; } /** * Transform every value of a situation to a pourcentage from first value */ Minute **SituationToPourcent(int cursor) { for (int i = 0; i < SIT_SIZE; i++) { env.srcPourcent[i]->time = env.minutes[cursor + i]->time; for (int coinIndex = 0; coinIndex < NBR_COIN_CUDA; coinIndex++) { env.srcPourcent[i]->data[coinIndex].close = env.minutes[cursor + i]->data[coinIndex].close / env.minutes[cursor]->data[coinIndex].close * 100; env.srcPourcent[i]->data[coinIndex].high = env.minutes[cursor + i]->data[coinIndex].high / env.minutes[cursor]->data[coinIndex].high * 100; env.srcPourcent[i]->data[coinIndex].low = env.minutes[cursor + i]->data[coinIndex].low / env.minutes[cursor]->data[coinIndex].low * 100; env.srcPourcent[i]->data[coinIndex].open = env.minutes[cursor + i]->data[coinIndex].open / env.minutes[cursor]->data[coinIndex].open * 100; env.srcPourcent[i]->data[coinIndex].volume = env.minutes[cursor + i]->data[coinIndex].volume / env.minutes[cursor + i]->data[coinIndex].volume * 100; } } return env.srcPourcent; } /** * Export situation to external program */ void printSituation(int cursor, int coinId) { dprintf(2, "sit : %lf coinId : %d\n", env.minutes[cursor]->time, coinId); dprintf(1, "#SIT"); for (int i = 0; i < SIT_SIZE; i++) { dprintf(2, " %lf", env.minutes[i + cursor]->data[coinId].open); dprintf(1, " %lf", env.minutes[i + cursor]->data[coinId].open); } dprintf(1, "\n"); } /** * Compare Given situation with all history */ void bakeSituation(int cursor, int baseCoinId) { // score int *scores = env.scores; int baseCursor = cursor; Minute **pourcent = SituationToPourcent(cursor); cursor = 0; for (int hi = 0; hi < NBR_HIGH_SCORE; hi++) { env.highScores[hi].score = 99999999; env.highScores[hi].minuteId = 0; env.highScores[hi].coinId = 0; } for (int bakeIndex = 0; cursor < 870000; bakeIndex++) { hipLaunchKernelGGL(( bake), dim3(NBR_BLOCK), dim3(NBR_COIN_CUDA), 0, 0, pourcent, baseCoinId, cursor, env.minutes, scores); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } for (int i = 0; i < NBR_BLOCK * NBR_COIN_CUDA; i++) { if (scores[i] != -1) { int minuteId = i / NBR_COIN; int coinId = i % NBR_COIN; if (abs((minuteId + cursor) - baseCursor) < (SIT_SIZE * 5)) { continue; } for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) { if (scores[i] < env.highScores[highIndex].score) { env.highScores[highIndex].score = scores[i]; env.highScores[highIndex].minuteId = minuteId + cursor; env.highScores[highIndex].coinId = coinId; i += NBR_COIN_CUDA * 50; break; } } } } cursor += NBR_BLOCK; } } /** * Return the guessed percentage of change from situation to TIME_GUESS */ double makeNextGuess() { double pred = 0; for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) { double start = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE] ->data[env.highScores[highIndex].coinId] .open; double end = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE + TIME_GUESS] ->data[env.highScores[highIndex].coinId] .open; pred += 100 - (start / end * 100); } pred = pred / NBR_HIGH_SCORE; return pred; } /** * Get real next pourcent of given situation */ double getRealNext(int minuteId, int coinId) { double start = env.minutes[minuteId + SIT_SIZE]->data[coinId].open; double end = env.minutes[minuteId + SIT_SIZE + TIME_GUESS]->data[coinId].open; return 100 - (start / end * 100); } void initMem() { hipMallocManaged(&env.srcPourcent, sizeof(void *) * SIT_SIZE); for (int i = 0; i < SIT_SIZE; i++) { hipMallocManaged(&env.srcPourcent[i], sizeof(Minute)); } hipMallocManaged(&env.scores, sizeof(int) * NBR_BLOCK * NBR_COIN); env.guessed = (double *)malloc(sizeof(double) * SIT_SIZE); } Situation getRandomSituation() { Situation res; int last = 0; while (1) { res.cursor = random_number(200000, NBR_MINUTES - 1000); last = res.cursor; res.coinId = random_number(0, NBR_COIN_CUDA); if (env.minutes[res.cursor]->data[res.coinId].open != -1 && env.minutes[res.cursor]->data[res.coinId].open > MIN_PRICE) { return res; } usleep(1000); } } void printInfos(Situation sit) { FILE *fp; fp = fopen("tmp", "w"); fprintf(fp,"%d;%d(", sit.coinId, sit.cursor); for (int i = 20; i < 220; i += 20) { double start = env.minutes[sit.cursor + SIT_SIZE]->data[sit.coinId].open; double end = env.minutes[sit.cursor + SIT_SIZE + i]->data[sit.coinId].open; double pred = 100 - (start / end * 100); fprintf(fp,"%lf;", pred); } fprintf(fp,")-->"); for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) { fprintf(fp,"%d;%d(", env.highScores[highIndex].coinId, env.highScores[highIndex].minuteId); for (int i = 20; i < 220; i += 20) { double start = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE] ->data[env.highScores[highIndex].coinId] .open; double end = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE + i] ->data[env.highScores[highIndex].coinId] .open; double pred = 100 - (start / end * 100); fprintf(fp,"%lf;", pred); } fprintf(fp,")|"); } fprintf(fp,"\n"); fclose(fp); } int main() { srand(time(NULL)); env.minutes = loadHistory(0, AMOUNT_TEST); initMem(); Situation sit; Data *tmp = (Data*)malloc(sizeof(Data) * 500); double last = -1; while (1) { int fd = open("./actual",O_RDONLY); if (fd < 1){ printf("CUDA SLEEP 1\n"); sleep(1); continue; } int res = read(fd, tmp, sizeof(Data) * 500); if (last == tmp[0].open){ printf("CUDA SLEEP 2\n"); sleep(1); continue; } printf("PROCESSING !\n"); for (int i=0; i < 500; i++){ env.minutes[i]->data[0].open = tmp[i].open; } bakeSituation(0, 0); printInfos(sit); last = tmp[0].open; sleep(1); // break; // Situation sit = getRandomSituation(); // bakeSituation(sit.cursor, sit.coinId); } return 0; }
08b4d33bceb734c1ac7763a63967e2f2de50c804.cu
#include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define CBLACK "\33[30m" #define CRED "\33[31m" #define CGREEN "\33[32m" #define CWHITE "\33[37m" #define SIT_SIZE 500 #define NBR_COIN 162 #define NBR_COIN_CUDA 162 #define NBR_BLOCK 1024 #define NBR_HIGH_SCORE 50 #define MIN_PRICE 0.000620 #define TIME_GUESS 100 #define COIN_TEST 98 #define AMOUNT_BET 100 #define MIN_POURCENT_GUESS 0.001 #define NBR_MINUTES 881003 #define AMOUNT_TEST 881003 typedef struct { double open; double high; double low; double close; double volume; } Data; typedef struct { double time; Data data[NBR_COIN]; } Minute; typedef struct { int score; int minuteId; int coinId; } Score; typedef struct { Score highScores[NBR_HIGH_SCORE]; double *guessed; /**Cuda memory */ Minute **minutes; // all history Minute **srcPourcent; int *scores; } Env; typedef struct { int cursor; int coinId; } Situation; Env env; /** * Clear visual field */ void clear() { dprintf(1, "#CLS\n"); } /** * Launch the great machine comparator * Comparing pourcent source with all other minutes */ __global__ void bake(Minute **source, int sourceCoinId, int cursor, Minute **minutes, int *scores) { int coinId = threadIdx.x; int minuteId = blockIdx.x; double score = 0; if (minutes[cursor + minuteId]->data[coinId].open < MIN_PRICE) { scores[NBR_COIN_CUDA * minuteId + coinId] = -1; return; } for (int i = 0; i < SIT_SIZE; i++) { if (minutes[cursor + minuteId + i]->data[coinId].open == -1) { scores[NBR_COIN_CUDA * minuteId + coinId] = -1; return; } double pourcent = minutes[cursor + minuteId + i]->data[coinId].open / minutes[cursor + minuteId]->data[coinId].open * 100; score += fabs(fabs(source[i]->data[sourceCoinId].open) - fabs(pourcent)); } scores[NBR_COIN_CUDA * minuteId + coinId] = score; } /** * Generate a random number */ int random_number(int min_num, int max_num) { int result = (rand() % (max_num - min_num)) + min_num; return result; } /** * Load history in RAM and VRAM */ Minute **loadHistory(int start, int amount) { int fd = open("../data/bin/full", O_RDONLY); Minute **minutes; cudaMallocManaged(&minutes, sizeof(void *) * amount); int i = -1; while (1) { i++; cudaMallocManaged(&minutes[i], sizeof(Minute)); if (read(fd, minutes[i], sizeof(Minute)) < 1 || i == AMOUNT_TEST) break; } return minutes; } /** * Transform every value of a situation to a pourcentage from first value */ Minute **SituationToPourcent(int cursor) { for (int i = 0; i < SIT_SIZE; i++) { env.srcPourcent[i]->time = env.minutes[cursor + i]->time; for (int coinIndex = 0; coinIndex < NBR_COIN_CUDA; coinIndex++) { env.srcPourcent[i]->data[coinIndex].close = env.minutes[cursor + i]->data[coinIndex].close / env.minutes[cursor]->data[coinIndex].close * 100; env.srcPourcent[i]->data[coinIndex].high = env.minutes[cursor + i]->data[coinIndex].high / env.minutes[cursor]->data[coinIndex].high * 100; env.srcPourcent[i]->data[coinIndex].low = env.minutes[cursor + i]->data[coinIndex].low / env.minutes[cursor]->data[coinIndex].low * 100; env.srcPourcent[i]->data[coinIndex].open = env.minutes[cursor + i]->data[coinIndex].open / env.minutes[cursor]->data[coinIndex].open * 100; env.srcPourcent[i]->data[coinIndex].volume = env.minutes[cursor + i]->data[coinIndex].volume / env.minutes[cursor + i]->data[coinIndex].volume * 100; } } return env.srcPourcent; } /** * Export situation to external program */ void printSituation(int cursor, int coinId) { dprintf(2, "sit : %lf coinId : %d\n", env.minutes[cursor]->time, coinId); dprintf(1, "#SIT"); for (int i = 0; i < SIT_SIZE; i++) { dprintf(2, " %lf", env.minutes[i + cursor]->data[coinId].open); dprintf(1, " %lf", env.minutes[i + cursor]->data[coinId].open); } dprintf(1, "\n"); } /** * Compare Given situation with all history */ void bakeSituation(int cursor, int baseCoinId) { // score int *scores = env.scores; int baseCursor = cursor; Minute **pourcent = SituationToPourcent(cursor); cursor = 0; for (int hi = 0; hi < NBR_HIGH_SCORE; hi++) { env.highScores[hi].score = 99999999; env.highScores[hi].minuteId = 0; env.highScores[hi].coinId = 0; } for (int bakeIndex = 0; cursor < 870000; bakeIndex++) { bake<<<NBR_BLOCK, NBR_COIN_CUDA>>>(pourcent, baseCoinId, cursor, env.minutes, scores); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } for (int i = 0; i < NBR_BLOCK * NBR_COIN_CUDA; i++) { if (scores[i] != -1) { int minuteId = i / NBR_COIN; int coinId = i % NBR_COIN; if (abs((minuteId + cursor) - baseCursor) < (SIT_SIZE * 5)) { continue; } for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) { if (scores[i] < env.highScores[highIndex].score) { env.highScores[highIndex].score = scores[i]; env.highScores[highIndex].minuteId = minuteId + cursor; env.highScores[highIndex].coinId = coinId; i += NBR_COIN_CUDA * 50; break; } } } } cursor += NBR_BLOCK; } } /** * Return the guessed percentage of change from situation to TIME_GUESS */ double makeNextGuess() { double pred = 0; for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) { double start = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE] ->data[env.highScores[highIndex].coinId] .open; double end = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE + TIME_GUESS] ->data[env.highScores[highIndex].coinId] .open; pred += 100 - (start / end * 100); } pred = pred / NBR_HIGH_SCORE; return pred; } /** * Get real next pourcent of given situation */ double getRealNext(int minuteId, int coinId) { double start = env.minutes[minuteId + SIT_SIZE]->data[coinId].open; double end = env.minutes[minuteId + SIT_SIZE + TIME_GUESS]->data[coinId].open; return 100 - (start / end * 100); } void initMem() { cudaMallocManaged(&env.srcPourcent, sizeof(void *) * SIT_SIZE); for (int i = 0; i < SIT_SIZE; i++) { cudaMallocManaged(&env.srcPourcent[i], sizeof(Minute)); } cudaMallocManaged(&env.scores, sizeof(int) * NBR_BLOCK * NBR_COIN); env.guessed = (double *)malloc(sizeof(double) * SIT_SIZE); } Situation getRandomSituation() { Situation res; int last = 0; while (1) { res.cursor = random_number(200000, NBR_MINUTES - 1000); last = res.cursor; res.coinId = random_number(0, NBR_COIN_CUDA); if (env.minutes[res.cursor]->data[res.coinId].open != -1 && env.minutes[res.cursor]->data[res.coinId].open > MIN_PRICE) { return res; } usleep(1000); } } void printInfos(Situation sit) { FILE *fp; fp = fopen("tmp", "w"); fprintf(fp,"%d;%d(", sit.coinId, sit.cursor); for (int i = 20; i < 220; i += 20) { double start = env.minutes[sit.cursor + SIT_SIZE]->data[sit.coinId].open; double end = env.minutes[sit.cursor + SIT_SIZE + i]->data[sit.coinId].open; double pred = 100 - (start / end * 100); fprintf(fp,"%lf;", pred); } fprintf(fp,")-->"); for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) { fprintf(fp,"%d;%d(", env.highScores[highIndex].coinId, env.highScores[highIndex].minuteId); for (int i = 20; i < 220; i += 20) { double start = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE] ->data[env.highScores[highIndex].coinId] .open; double end = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE + i] ->data[env.highScores[highIndex].coinId] .open; double pred = 100 - (start / end * 100); fprintf(fp,"%lf;", pred); } fprintf(fp,")|"); } fprintf(fp,"\n"); fclose(fp); } int main() { srand(time(NULL)); env.minutes = loadHistory(0, AMOUNT_TEST); initMem(); Situation sit; Data *tmp = (Data*)malloc(sizeof(Data) * 500); double last = -1; while (1) { int fd = open("./actual",O_RDONLY); if (fd < 1){ printf("CUDA SLEEP 1\n"); sleep(1); continue; } int res = read(fd, tmp, sizeof(Data) * 500); if (last == tmp[0].open){ printf("CUDA SLEEP 2\n"); sleep(1); continue; } printf("PROCESSING !\n"); for (int i=0; i < 500; i++){ env.minutes[i]->data[0].open = tmp[i].open; } bakeSituation(0, 0); printInfos(sit); last = tmp[0].open; sleep(1); // break; // Situation sit = getRandomSituation(); // bakeSituation(sit.cursor, sit.coinId); } return 0; }
68f4df390b137f3777b7a826bf221d4ede821a05.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BICUBICTEXTURE_CU_ #define _BICUBICTEXTURE_CU_ #include <stdlib.h> #include <stdio.h> #include <string.h> // includes, cuda #include <hip/hip_runtime_api.h> #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; #include <bicubicTexture_kernel.cuh> //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( hipError_t err, const char *file, const int line ) { if( hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions hipArray *d_imageArray = 0; extern "C" void initTexture(int imageWidth, int imageHeight, uchar *h_data) { // allocate array and copy image data hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned); checkCudaErrors( hipMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight) ); uint size = imageWidth * imageHeight * sizeof(uchar); checkCudaErrors( hipMemcpyToArray(d_imageArray, 0, 0, h_data, size, hipMemcpyHostToDevice) ); free(h_data); // set texture parameters tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; tex.filterMode = hipFilterModeLinear; tex.normalized = false; // access with integer texture coordinates getLastCudaError("initTexture"); // Bind the array to the texture checkCudaErrors( hipBindTextureToArray(tex, d_imageArray) ); // bind same array to 2nd texture reference with point sampling tex2.addressMode[0] = hipAddressModeClamp; tex2.addressMode[1] = hipAddressModeClamp; tex2.filterMode = hipFilterModePoint; tex2.normalized = false; // access with integer texture coordinates checkCudaErrors( hipBindTextureToArray(tex2, d_imageArray) ); } extern "C" void freeTexture() { checkCudaErrors(hipFreeArray(d_imageArray)); } // render image using CUDA extern "C" void render(int width, int height, float tx, float ty, float scale, float cx, float cy, dim3 blockSize, dim3 gridSize, int mode, uchar4 *output) { // call CUDA kernel, writing results to PBO memory switch(mode) { case MODE_NEAREST: tex.filterMode = hipFilterModePoint; hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_BILINEAR: tex.filterMode = hipFilterModeLinear; hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_BICUBIC: tex.filterMode = hipFilterModePoint; hipLaunchKernelGGL(( d_renderBicubic), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_FAST_BICUBIC: tex.filterMode = hipFilterModeLinear; hipLaunchKernelGGL(( d_renderFastBicubic), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; case MODE_CATROM: tex.filterMode = hipFilterModePoint; hipLaunchKernelGGL(( d_renderCatRom), dim3(gridSize), dim3(blockSize), 0, 0, output, width, height, tx, ty, scale, cx, cy); break; } getLastCudaError("kernel failed"); } #endif
68f4df390b137f3777b7a826bf221d4ede821a05.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef _BICUBICTEXTURE_CU_ #define _BICUBICTEXTURE_CU_ #include <stdlib.h> #include <stdio.h> #include <string.h> // includes, cuda #include <cuda_runtime_api.h> #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; #include <bicubicTexture_kernel.cuh> //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions cudaArray *d_imageArray = 0; extern "C" void initTexture(int imageWidth, int imageHeight, uchar *h_data) { // allocate array and copy image data cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); checkCudaErrors( cudaMallocArray(&d_imageArray, &channelDesc, imageWidth, imageHeight) ); uint size = imageWidth * imageHeight * sizeof(uchar); checkCudaErrors( cudaMemcpyToArray(d_imageArray, 0, 0, h_data, size, cudaMemcpyHostToDevice) ); free(h_data); // set texture parameters tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModeLinear; tex.normalized = false; // access with integer texture coordinates getLastCudaError("initTexture"); // Bind the array to the texture checkCudaErrors( cudaBindTextureToArray(tex, d_imageArray) ); // bind same array to 2nd texture reference with point sampling tex2.addressMode[0] = cudaAddressModeClamp; tex2.addressMode[1] = cudaAddressModeClamp; tex2.filterMode = cudaFilterModePoint; tex2.normalized = false; // access with integer texture coordinates checkCudaErrors( cudaBindTextureToArray(tex2, d_imageArray) ); } extern "C" void freeTexture() { checkCudaErrors(cudaFreeArray(d_imageArray)); } // render image using CUDA extern "C" void render(int width, int height, float tx, float ty, float scale, float cx, float cy, dim3 blockSize, dim3 gridSize, int mode, uchar4 *output) { // call CUDA kernel, writing results to PBO memory switch(mode) { case MODE_NEAREST: tex.filterMode = cudaFilterModePoint; d_render<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_BILINEAR: tex.filterMode = cudaFilterModeLinear; d_render<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_BICUBIC: tex.filterMode = cudaFilterModePoint; d_renderBicubic<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_FAST_BICUBIC: tex.filterMode = cudaFilterModeLinear; d_renderFastBicubic<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; case MODE_CATROM: tex.filterMode = cudaFilterModePoint; d_renderCatRom<<<gridSize, blockSize>>>(output, width, height, tx, ty, scale, cx, cy); break; } getLastCudaError("kernel failed"); } #endif
d8078df5d769fe79904fdfef4b93dcb8380b3035.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define NUM_NODES 1024 // Declaration of a structure typedef struct { int startIndex; // starting index in Adj list int numberOfNeighbors; // number of neighbors of each vertices } Node; __global__ void bfs_optimized(Node *gpu_vertex, int *gpu_neighbors, bool *gpu_frontier, bool *gpu_visited, int *gpu_cost, bool *gpu_done) { // ThreadID int threadId = threadIdx.x + blockIdx.x * blockDim.x; // boundary condition for threadID if (threadId > NUM_NODES) *gpu_done = false; // checking condition for frontier and visited node array if (gpu_frontier[threadId] == true && gpu_visited[threadId] == false) { // Init gpu_frontier[threadId] = false; gpu_visited[threadId] = true; // assign values from array int startPoint = gpu_vertex[threadId].startIndex; int endPoint = startPoint + gpu_vertex[threadId].numberOfNeighbors; // traverse to the neighbors for every vertex for (int i = startPoint; i < endPoint; i++) { int neighbor = gpu_neighbors[i]; // check visited mark and increase cost if (gpu_visited[neighbor] == false) { gpu_cost[neighbor] = gpu_cost[threadId] + 1; gpu_frontier[neighbor] = true; *gpu_done = false; } } } } // Main method int main(int argc, char* argv[]) { // Kernel launch parameters int numberOfThreads = 1024; int numberOfBlocks = NUM_NODES/numberOfThreads; // Intialization of struct and neighbors array Node vertex[NUM_NODES]; int edges[NUM_NODES]; // populate the graph for(int i=0;i<NUM_NODES;i++) { vertex[i].numberOfNeighbors = 1;//(rand() % 5)+1; } vertex[0].startIndex = 0; for(int j=1;j<NUM_NODES;j++) { vertex[j].startIndex = vertex[j-1].startIndex + vertex[j-1].numberOfNeighbors; } for(int k=0;k<NUM_NODES;k++) { edges[k] = k+1; } hipSetDevice(0); // Time variable hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); // Intitalization of array for frontier and visited nodes and costpath bool frontierArray[NUM_NODES] = { false }; bool visitedNodes[NUM_NODES] = { false }; int costOfPath[NUM_NODES] = { 0 }; int source = 0; frontierArray[source] = true; // GPU variable declaration Node* gpu_vertex; int* gpu_neighbors; bool* gpu_frontier; bool* gpu_visited; int* gpu_cost; bool* gpu_done; // GPU memory allocation hipMalloc((void**)&gpu_vertex, sizeof(Node)*NUM_NODES); hipMalloc((void**)&gpu_neighbors, sizeof(Node)*NUM_NODES); hipMalloc((void**)&gpu_frontier, sizeof(bool)*NUM_NODES); hipMalloc((void**)&gpu_visited, sizeof(bool)*NUM_NODES); hipMalloc((void**)&gpu_cost, sizeof(int)*NUM_NODES); hipMalloc((void**)&gpu_done, sizeof(bool)); // Transfer of data from CPU to GPU hipMemcpy(gpu_vertex, vertex, sizeof(Node)*NUM_NODES, hipMemcpyHostToDevice); hipMemcpy(gpu_neighbors, edges, sizeof(Node)*NUM_NODES, hipMemcpyHostToDevice); hipMemcpy(gpu_frontier, frontierArray, sizeof(bool)*NUM_NODES, hipMemcpyHostToDevice); hipMemcpy(gpu_visited, visitedNodes, sizeof(bool)*NUM_NODES, hipMemcpyHostToDevice); hipMemcpy(gpu_cost, costOfPath, sizeof(int)*NUM_NODES, hipMemcpyHostToDevice); bool cpu_done; hipEventRecord(start, 0); int Kernel_call_count = 0; do { Kernel_call_count++; cpu_done = true; hipMemcpy(gpu_done, &cpu_done, sizeof(bool), hipMemcpyHostToDevice); // Kernel call hipLaunchKernelGGL(( bfs_optimized), dim3(numberOfBlocks), dim3(numberOfThreads), 0, 0, gpu_vertex, gpu_neighbors, gpu_frontier, gpu_visited, gpu_cost, gpu_done); hipMemcpy(&cpu_done, gpu_done , sizeof(bool), hipMemcpyDeviceToHost); } while (!cpu_done); // Copy final results from GPU to CPU hipMemcpy(costOfPath, gpu_cost, sizeof(int)*NUM_NODES, hipMemcpyDeviceToHost); printf("Kernel call count: %d\n", Kernel_call_count); hipEventRecord(stop, 0); hipEventElapsedTime(&time, start, stop); printf("Parallel Job execution time: %.2f ms\n", time); hipFree(gpu_vertex); hipFree(gpu_neighbors); hipFree(gpu_frontier); hipFree(gpu_visited); hipFree(gpu_cost); hipFree(gpu_done); return 0; }
d8078df5d769fe79904fdfef4b93dcb8380b3035.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #define NUM_NODES 1024 // Declaration of a structure typedef struct { int startIndex; // starting index in Adj list int numberOfNeighbors; // number of neighbors of each vertices } Node; __global__ void bfs_optimized(Node *gpu_vertex, int *gpu_neighbors, bool *gpu_frontier, bool *gpu_visited, int *gpu_cost, bool *gpu_done) { // ThreadID int threadId = threadIdx.x + blockIdx.x * blockDim.x; // boundary condition for threadID if (threadId > NUM_NODES) *gpu_done = false; // checking condition for frontier and visited node array if (gpu_frontier[threadId] == true && gpu_visited[threadId] == false) { // Init gpu_frontier[threadId] = false; gpu_visited[threadId] = true; // assign values from array int startPoint = gpu_vertex[threadId].startIndex; int endPoint = startPoint + gpu_vertex[threadId].numberOfNeighbors; // traverse to the neighbors for every vertex for (int i = startPoint; i < endPoint; i++) { int neighbor = gpu_neighbors[i]; // check visited mark and increase cost if (gpu_visited[neighbor] == false) { gpu_cost[neighbor] = gpu_cost[threadId] + 1; gpu_frontier[neighbor] = true; *gpu_done = false; } } } } // Main method int main(int argc, char* argv[]) { // Kernel launch parameters int numberOfThreads = 1024; int numberOfBlocks = NUM_NODES/numberOfThreads; // Intialization of struct and neighbors array Node vertex[NUM_NODES]; int edges[NUM_NODES]; // populate the graph for(int i=0;i<NUM_NODES;i++) { vertex[i].numberOfNeighbors = 1;//(rand() % 5)+1; } vertex[0].startIndex = 0; for(int j=1;j<NUM_NODES;j++) { vertex[j].startIndex = vertex[j-1].startIndex + vertex[j-1].numberOfNeighbors; } for(int k=0;k<NUM_NODES;k++) { edges[k] = k+1; } cudaSetDevice(0); // Time variable cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); // Intitalization of array for frontier and visited nodes and costpath bool frontierArray[NUM_NODES] = { false }; bool visitedNodes[NUM_NODES] = { false }; int costOfPath[NUM_NODES] = { 0 }; int source = 0; frontierArray[source] = true; // GPU variable declaration Node* gpu_vertex; int* gpu_neighbors; bool* gpu_frontier; bool* gpu_visited; int* gpu_cost; bool* gpu_done; // GPU memory allocation cudaMalloc((void**)&gpu_vertex, sizeof(Node)*NUM_NODES); cudaMalloc((void**)&gpu_neighbors, sizeof(Node)*NUM_NODES); cudaMalloc((void**)&gpu_frontier, sizeof(bool)*NUM_NODES); cudaMalloc((void**)&gpu_visited, sizeof(bool)*NUM_NODES); cudaMalloc((void**)&gpu_cost, sizeof(int)*NUM_NODES); cudaMalloc((void**)&gpu_done, sizeof(bool)); // Transfer of data from CPU to GPU cudaMemcpy(gpu_vertex, vertex, sizeof(Node)*NUM_NODES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_neighbors, edges, sizeof(Node)*NUM_NODES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_frontier, frontierArray, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_visited, visitedNodes, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice); cudaMemcpy(gpu_cost, costOfPath, sizeof(int)*NUM_NODES, cudaMemcpyHostToDevice); bool cpu_done; cudaEventRecord(start, 0); int Kernel_call_count = 0; do { Kernel_call_count++; cpu_done = true; cudaMemcpy(gpu_done, &cpu_done, sizeof(bool), cudaMemcpyHostToDevice); // Kernel call bfs_optimized<<<numberOfBlocks, numberOfThreads>>>(gpu_vertex, gpu_neighbors, gpu_frontier, gpu_visited, gpu_cost, gpu_done); cudaMemcpy(&cpu_done, gpu_done , sizeof(bool), cudaMemcpyDeviceToHost); } while (!cpu_done); // Copy final results from GPU to CPU cudaMemcpy(costOfPath, gpu_cost, sizeof(int)*NUM_NODES, cudaMemcpyDeviceToHost); printf("Kernel call count: %d\n", Kernel_call_count); cudaEventRecord(stop, 0); cudaEventElapsedTime(&time, start, stop); printf("Parallel Job execution time: %.2f ms\n", time); cudaFree(gpu_vertex); cudaFree(gpu_neighbors); cudaFree(gpu_frontier); cudaFree(gpu_visited); cudaFree(gpu_cost); cudaFree(gpu_done); return 0; }
64ca6e5b724b8c568246820792c28f2efe915521.hip
// !!! This is a file automatically generated by hipify!!! #include "./c_runtime_api.h" #include <cassert> #include <cstdio> #include <rocblas.h> #include <hip/hip_runtime.h> #include <math.h> /* TODO: Your code here */ /* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */ // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol, const float *input_a, const float *input_b, float *output) { // Dynamic shared memory, size provided at kernel launch. extern __shared__ float loss_per_row[]; // Two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } input_a += y * ncol; input_b += y * ncol; float maxval = *input_a; // Find max for a row. for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_a[x]); } // Deduct by max for a row, and raise to exp. float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_a[x] - maxval); } // Compute per-row loss. float loss = 0; for (int x = 0; x < ncol; ++x) { loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum); } loss_per_row[y] = loss; __syncthreads(); // Compute reduce_mean across rows. float mean_loss = 0; // Use a single thread to reduce mean across rows. if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i = 0; i < nrow; ++i) { mean_loss += loss_per_row[i]; } mean_loss /= nrow; output[0] = mean_loss; } } __global__ void array_set_kernel(float *array, float value, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { array[index] = value; } } int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < arr->ndim; i++) { n = n * arr->shape[i]; } float *array_data = (float *) arr->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; array_set_kernel << < num_blocks, threads_per_block >> > (array_data, value, n); return 0; } __global__ void broadcast_to_kernel(const float *input_data, float *output_data, index_t input_n, index_t output_n) { index_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < output_n) { output_data[idx] = input_data[idx % input_n]; } } int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ index_t input_n = 1; for (int i = 0; i < input->ndim; i++) input_n *= input->shape[i]; index_t output_n = 1; for (int i = 0; i < output->ndim; i++) output_n *= output->shape[i]; const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int thread_per_block = 512; int n_blocks = (output_n + thread_per_block - 1) / thread_per_block; broadcast_to_kernel << < n_blocks, thread_per_block >> > (input_data, output_data, input_n, output_n); return 0; } __global__ void reduced_sum_axis_zero(const float *input_data, float *output_data, int input_n, int output_n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < output_n) { output_data[idx] = 0.0; for (int i = 0; i < input_n / output_n; i++) { output_data[idx] += input_data[i * output_n + idx]; } } } int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ int input_n = 1; for (int i = 0; i < input->ndim; i++) { input_n *= input->shape[i]; } int output_n = 1; for (int i = 0; i < output->ndim; i++) { output_n *= output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int thread_per_block = 1024; int n_blocks = (output_n + thread_per_block - 1) / thread_per_block; reduced_sum_axis_zero << < n_blocks, thread_per_block >> > (input_data, output_data, input_n, output_n); return 0; } __global__ void matrix_elementwise_add(const float *a, const float *b, float *c, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { c[index] = a[index] + b[index]; } } int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *data_A = (const float *) matA->data; const float *data_B = (const float *) matB->data; float *data_output = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_add << < num_blocks, threads_per_block >> > (data_A, data_B, data_output, n); return 0; } __global__ void matrix_elementwise_subtract(const float *a, const float *b, float *c, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { c[index] = a[index] - b[index]; } } int DLGpuMatrixElementwiseSubtract(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *data_A = (const float *) matA->data; const float *data_B = (const float *) matB->data; float *data_output = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_subtract << < num_blocks, threads_per_block >> > (data_A, data_B, data_output, n); return 0; } __global__ void matrix_elementwise_division(const float *a, const float *b, float *result, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { result[index] = a[index] / b[index]; } } int DLGpuMatrixElementwiseDiv(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *data_A = (const float *) matA->data; const float *data_B = (const float *) matB->data; float *data_output = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_division << < num_blocks, threads_per_block >> > (data_A, data_B, data_output, n); return 0; } __global__ void matrix_elementwise_add_by_const_kernal(const float *d_in, float *d_out, float val, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_out[index] = d_in[index] + val; } } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_add_by_const_kernal << < num_blocks, threads_per_block >> > ( input_data, output_data, val, n); return 0; } __global__ void matrix_elementwise_subtract_by_const_kernal(const float *d_in, float *d_out, float val, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_out[index] = d_in[index] - val; } } int DLGpuMatrixElementwiseSubtractByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_subtract_by_const_kernal << < num_blocks, threads_per_block >> > ( input_data, output_data, val, n); return 0; } __global__ void matrix_elementwise_div_by_const_kernal(const float *d_in, float *d_out, float val, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_out[index] = d_in[index] / val; } } int DLGpuMatrixElementwiseDivByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_div_by_const_kernal << < num_blocks, threads_per_block >> > ( input_data, output_data, val, n); return 0; } __global__ void elementwise_mul_kernel(const float *data_a, const float *data_b, float *output, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { output[index] = data_a[index] * data_b[index]; } } int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; const float *mat_a_data = (const float *) matA->data; const float *mat_b_data = (const float *) matB->data; float *output_data = (float *) output->data; elementwise_mul_kernel << < num_blocks, threads_per_block >> > (mat_a_data, mat_b_data, output_data, n); return 0; } __global__ void matrix_elementwise_sqrt(const float *d_input, float *d_output, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { d_output[index] = sqrt(d_input[index]); } } int DLGpuMatrixElementwiseSqrt(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_sqrt << < num_blocks, threads_per_block >> > (input_data, output_data, n); return 0; } __global__ void marix_multiply_by_const(const float *d_input, float *d_output, float val, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { d_output[index] = d_input[index] * val; } } int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; marix_multiply_by_const << < num_blocks, threads_per_block >> > (input_data, output_data, val, n); return 0; } // int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, // const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { // /* TODO: Your code here */ // // Hint: use cublas // // cublas assume matrix is column major // hipblasHandle_t handle; // hipblasStatus_t stat = hipblasCreate(&handle); // if (stat != HIPBLAS_STATUS_SUCCESS) // printf("CUBLAS initialization failed\n"); // const float *matA_data = (const float *) matA->data; // const float *matB_data = (const float *) matB->data; // float *matC_data = (float *) matC->data; // hipblasOperation_t transa = transposeA ? HIPBLAS_OP_T : HIPBLAS_OP_N; // hipblasOperation_t transb = transposeB ? HIPBLAS_OP_T : HIPBLAS_OP_N; // int m = transposeB ? matB->shape[0] : matB->shape[1]; // int n = transposeA ? matA->shape[1] : matA->shape[0]; // int k = transposeA ? matA->shape[0] : matA->shape[1]; // float alpha = 1.0f; // float beta = 0.0f; // stat = hipblasSgemm(handle, transb, transa, // m, n, k, // &alpha, matB_data, matB->shape[1], // matA_data, matA->shape[1], // &beta, matC_data, m); // if (stat != HIPBLAS_STATUS_SUCCESS) // printf("CUBLAS kernel execution error.\n"); // stat = hipblasDestroy(handle); // if (stat != HIPBLAS_STATUS_SUCCESS) // printf("CUBLAS shutdown error\n"); // return 0; // } hipblasHandle_t cublas_handle = NULL; int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { /* TODO: Your code here */ // Hint: use cublas // cublas assume matrix is column major // op(A) * op(B) = C // op(B)T * op(A)T = CT if (!cublas_handle) { hipblasCreate(&cublas_handle); } float one = 1.0f; float zero = 0.0f; int m = matC->shape[1]; int n = matC->shape[0]; int k = transposeA ? matA->shape[0] : matA->shape[1]; hipblasSgemm(cublas_handle, transposeB ? HIPBLAS_OP_T : HIPBLAS_OP_N, transposeA ? HIPBLAS_OP_T : HIPBLAS_OP_N, m, n, k, &one, (const float *) matB->data, !transposeB ? m : k, (const float *) matA->data, !transposeA ? k : n, &zero, (float *) matC->data, m ); return 0; } __global__ void relu_kernel(const float *input, float *output, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { float element = input[index]; if (element <= 0) { output[index] = 0; } else { output[index] = element; } } } int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; relu_kernel << < num_blocks, threads_per_block >> > (input_data, output_data, n); return 0; } __global__ void relu_gradient_kernel(const float *input, float *output, const float *in_grad, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { float element = input[index]; if (element <= 0) { output[index] = 0; } else { output[index] = in_grad[index]; } } } int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; const float *in_grad_data = (const float *) in_grad->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; relu_gradient_kernel << < num_blocks, threads_per_block >> > (input_data, output_data, in_grad_data, n); return 0; } __global__ void softmax_kernel(int64_t nrow, int64_t ncol, const float *input_data, float *output_data) { // two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } // y_th row of input data input_data += y * ncol; output_data += y * ncol; // find max for a row. float maxval = *input_data; for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_data[x]); } // Deduct by max for a row, and raise to exp. // in case of too large of exp, and the result will not be affected float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_data[x] - maxval); } // Compute per-row softmax. for (int x = 0; x < ncol; ++x) { output_data[x] = exp(input_data[x] - maxval) / sum; } } int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ assert(input->ndim == 2); assert(output->ndim == 2); int64_t nrow = input->shape[0]; int64_t ncol = input->shape[1]; float *input_data = (float *) input->data; float *output_data = (float *) output->data; dim3 threads; if (nrow < 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } softmax_kernel << < 1, threads >> > (nrow, ncol, input_data, output_data); return 0; } int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a, const DLArrayHandle input_b, DLArrayHandle output) { assert(input_a->ndim == 2); assert(input_b->ndim == 2); assert(output->ndim == 1); assert( input_a->shape[0] == input_b->shape[0] && input_a->shape[1] == input_b->shape[1]); int nrow = input_a->shape[0]; // Maximum x- or y-dimension of a block = 1024 // But we need 'nrow' shared memory, and max shared memory is 48KB. // Conservatively allow max 16KB shared memory. assert(nrow <= 1024 * 4); int ncol = input_a->shape[1]; const float *input_data_a = (const float *) input_a->data; const float *input_data_b = (const float *) input_b->data; float *output_data = (float *) output->data; dim3 threads; if (nrow <= 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } // 1 block, each block with 'threads' number of threads with 'nrow' shared // memory size matrix_softmax_cross_entropy_kernel << < 1, threads, nrow * sizeof(float) >> > ( nrow, ncol, input_data_a, input_data_b, output_data); return 0; }
64ca6e5b724b8c568246820792c28f2efe915521.cu
#include "./c_runtime_api.h" #include <cassert> #include <cstdio> #include <cublas_v2.h> #include <cuda_runtime.h> #include <math.h> /* TODO: Your code here */ /* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */ // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol, const float *input_a, const float *input_b, float *output) { // Dynamic shared memory, size provided at kernel launch. extern __shared__ float loss_per_row[]; // Two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } input_a += y * ncol; input_b += y * ncol; float maxval = *input_a; // Find max for a row. for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_a[x]); } // Deduct by max for a row, and raise to exp. float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_a[x] - maxval); } // Compute per-row loss. float loss = 0; for (int x = 0; x < ncol; ++x) { loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum); } loss_per_row[y] = loss; __syncthreads(); // Compute reduce_mean across rows. float mean_loss = 0; // Use a single thread to reduce mean across rows. if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i = 0; i < nrow; ++i) { mean_loss += loss_per_row[i]; } mean_loss /= nrow; output[0] = mean_loss; } } __global__ void array_set_kernel(float *array, float value, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { array[index] = value; } } int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < arr->ndim; i++) { n = n * arr->shape[i]; } float *array_data = (float *) arr->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; array_set_kernel << < num_blocks, threads_per_block >> > (array_data, value, n); return 0; } __global__ void broadcast_to_kernel(const float *input_data, float *output_data, index_t input_n, index_t output_n) { index_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < output_n) { output_data[idx] = input_data[idx % input_n]; } } int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ index_t input_n = 1; for (int i = 0; i < input->ndim; i++) input_n *= input->shape[i]; index_t output_n = 1; for (int i = 0; i < output->ndim; i++) output_n *= output->shape[i]; const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int thread_per_block = 512; int n_blocks = (output_n + thread_per_block - 1) / thread_per_block; broadcast_to_kernel << < n_blocks, thread_per_block >> > (input_data, output_data, input_n, output_n); return 0; } __global__ void reduced_sum_axis_zero(const float *input_data, float *output_data, int input_n, int output_n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < output_n) { output_data[idx] = 0.0; for (int i = 0; i < input_n / output_n; i++) { output_data[idx] += input_data[i * output_n + idx]; } } } int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ int input_n = 1; for (int i = 0; i < input->ndim; i++) { input_n *= input->shape[i]; } int output_n = 1; for (int i = 0; i < output->ndim; i++) { output_n *= output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int thread_per_block = 1024; int n_blocks = (output_n + thread_per_block - 1) / thread_per_block; reduced_sum_axis_zero << < n_blocks, thread_per_block >> > (input_data, output_data, input_n, output_n); return 0; } __global__ void matrix_elementwise_add(const float *a, const float *b, float *c, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { c[index] = a[index] + b[index]; } } int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *data_A = (const float *) matA->data; const float *data_B = (const float *) matB->data; float *data_output = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_add << < num_blocks, threads_per_block >> > (data_A, data_B, data_output, n); return 0; } __global__ void matrix_elementwise_subtract(const float *a, const float *b, float *c, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { c[index] = a[index] - b[index]; } } int DLGpuMatrixElementwiseSubtract(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *data_A = (const float *) matA->data; const float *data_B = (const float *) matB->data; float *data_output = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_subtract << < num_blocks, threads_per_block >> > (data_A, data_B, data_output, n); return 0; } __global__ void matrix_elementwise_division(const float *a, const float *b, float *result, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { result[index] = a[index] / b[index]; } } int DLGpuMatrixElementwiseDiv(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *data_A = (const float *) matA->data; const float *data_B = (const float *) matB->data; float *data_output = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_division << < num_blocks, threads_per_block >> > (data_A, data_B, data_output, n); return 0; } __global__ void matrix_elementwise_add_by_const_kernal(const float *d_in, float *d_out, float val, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_out[index] = d_in[index] + val; } } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_add_by_const_kernal << < num_blocks, threads_per_block >> > ( input_data, output_data, val, n); return 0; } __global__ void matrix_elementwise_subtract_by_const_kernal(const float *d_in, float *d_out, float val, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_out[index] = d_in[index] - val; } } int DLGpuMatrixElementwiseSubtractByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_subtract_by_const_kernal << < num_blocks, threads_per_block >> > ( input_data, output_data, val, n); return 0; } __global__ void matrix_elementwise_div_by_const_kernal(const float *d_in, float *d_out, float val, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_out[index] = d_in[index] / val; } } int DLGpuMatrixElementwiseDivByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_div_by_const_kernal << < num_blocks, threads_per_block >> > ( input_data, output_data, val, n); return 0; } __global__ void elementwise_mul_kernel(const float *data_a, const float *data_b, float *output, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { output[index] = data_a[index] * data_b[index]; } } int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < output->ndim; i++) { n = n * output->shape[i]; } int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; const float *mat_a_data = (const float *) matA->data; const float *mat_b_data = (const float *) matB->data; float *output_data = (float *) output->data; elementwise_mul_kernel << < num_blocks, threads_per_block >> > (mat_a_data, mat_b_data, output_data, n); return 0; } __global__ void matrix_elementwise_sqrt(const float *d_input, float *d_output, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { d_output[index] = sqrt(d_input[index]); } } int DLGpuMatrixElementwiseSqrt(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; matrix_elementwise_sqrt << < num_blocks, threads_per_block >> > (input_data, output_data, n); return 0; } __global__ void marix_multiply_by_const(const float *d_input, float *d_output, float val, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { d_output[index] = d_input[index] * val; } } int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; marix_multiply_by_const << < num_blocks, threads_per_block >> > (input_data, output_data, val, n); return 0; } // int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, // const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { // /* TODO: Your code here */ // // Hint: use cublas // // cublas assume matrix is column major // cublasHandle_t handle; // cublasStatus_t stat = cublasCreate(&handle); // if (stat != CUBLAS_STATUS_SUCCESS) // printf("CUBLAS initialization failed\n"); // const float *matA_data = (const float *) matA->data; // const float *matB_data = (const float *) matB->data; // float *matC_data = (float *) matC->data; // cublasOperation_t transa = transposeA ? CUBLAS_OP_T : CUBLAS_OP_N; // cublasOperation_t transb = transposeB ? CUBLAS_OP_T : CUBLAS_OP_N; // int m = transposeB ? matB->shape[0] : matB->shape[1]; // int n = transposeA ? matA->shape[1] : matA->shape[0]; // int k = transposeA ? matA->shape[0] : matA->shape[1]; // float alpha = 1.0f; // float beta = 0.0f; // stat = cublasSgemm(handle, transb, transa, // m, n, k, // &alpha, matB_data, matB->shape[1], // matA_data, matA->shape[1], // &beta, matC_data, m); // if (stat != CUBLAS_STATUS_SUCCESS) // printf("CUBLAS kernel execution error.\n"); // stat = cublasDestroy(handle); // if (stat != CUBLAS_STATUS_SUCCESS) // printf("CUBLAS shutdown error\n"); // return 0; // } cublasHandle_t cublas_handle = NULL; int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { /* TODO: Your code here */ // Hint: use cublas // cublas assume matrix is column major // op(A) * op(B) = C // op(B)T * op(A)T = CT if (!cublas_handle) { cublasCreate(&cublas_handle); } float one = 1.0f; float zero = 0.0f; int m = matC->shape[1]; int n = matC->shape[0]; int k = transposeA ? matA->shape[0] : matA->shape[1]; cublasSgemm(cublas_handle, transposeB ? CUBLAS_OP_T : CUBLAS_OP_N, transposeA ? CUBLAS_OP_T : CUBLAS_OP_N, m, n, k, &one, (const float *) matB->data, !transposeB ? m : k, (const float *) matA->data, !transposeA ? k : n, &zero, (float *) matC->data, m ); return 0; } __global__ void relu_kernel(const float *input, float *output, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { float element = input[index]; if (element <= 0) { output[index] = 0; } else { output[index] = element; } } } int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; relu_kernel << < num_blocks, threads_per_block >> > (input_data, output_data, n); return 0; } __global__ void relu_gradient_kernel(const float *input, float *output, const float *in_grad, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { float element = input[index]; if (element <= 0) { output[index] = 0; } else { output[index] = in_grad[index]; } } } int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad, DLArrayHandle output) { /* TODO: Your code here */ int n = 1; for (int i = 0; i < input->ndim; i++) { n *= input->shape[i]; } const float *input_data = (const float *) input->data; float *output_data = (float *) output->data; const float *in_grad_data = (const float *) in_grad->data; int threads_per_block = 1024; int num_blocks = (n + threads_per_block - 1) / threads_per_block; relu_gradient_kernel << < num_blocks, threads_per_block >> > (input_data, output_data, in_grad_data, n); return 0; } __global__ void softmax_kernel(int64_t nrow, int64_t ncol, const float *input_data, float *output_data) { // two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } // y_th row of input data input_data += y * ncol; output_data += y * ncol; // find max for a row. float maxval = *input_data; for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_data[x]); } // Deduct by max for a row, and raise to exp. // in case of too large of exp, and the result will not be affected float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_data[x] - maxval); } // Compute per-row softmax. for (int x = 0; x < ncol; ++x) { output_data[x] = exp(input_data[x] - maxval) / sum; } } int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ assert(input->ndim == 2); assert(output->ndim == 2); int64_t nrow = input->shape[0]; int64_t ncol = input->shape[1]; float *input_data = (float *) input->data; float *output_data = (float *) output->data; dim3 threads; if (nrow < 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } softmax_kernel << < 1, threads >> > (nrow, ncol, input_data, output_data); return 0; } int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a, const DLArrayHandle input_b, DLArrayHandle output) { assert(input_a->ndim == 2); assert(input_b->ndim == 2); assert(output->ndim == 1); assert( input_a->shape[0] == input_b->shape[0] && input_a->shape[1] == input_b->shape[1]); int nrow = input_a->shape[0]; // Maximum x- or y-dimension of a block = 1024 // But we need 'nrow' shared memory, and max shared memory is 48KB. // Conservatively allow max 16KB shared memory. assert(nrow <= 1024 * 4); int ncol = input_a->shape[1]; const float *input_data_a = (const float *) input_a->data; const float *input_data_b = (const float *) input_b->data; float *output_data = (float *) output->data; dim3 threads; if (nrow <= 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } // 1 block, each block with 'threads' number of threads with 'nrow' shared // memory size matrix_softmax_cross_entropy_kernel << < 1, threads, nrow * sizeof(float) >> > ( nrow, ncol, input_data_a, input_data_b, output_data); return 0; }
7c3a81cc044be8279ebcdd9689bdcc1b7c19e14e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_minus_2_top; int xdim0_update_halo_kernel2_yvel_minus_2_top_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_minus_2_top; int ydim0_update_halo_kernel2_yvel_minus_2_top_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_minus_2_top; int xdim1_update_halo_kernel2_yvel_minus_2_top_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_minus_2_top; int ydim1_update_halo_kernel2_yvel_minus_2_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_minus_2_top*(y)+xdim0_update_halo_kernel2_yvel_minus_2_top*ydim0_update_halo_kernel2_yvel_minus_2_top*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_minus_2_top*(y)+xdim1_update_halo_kernel2_yvel_minus_2_top*ydim1_update_halo_kernel2_yvel_minus_2_top*(z)) //user function __device__ inline void update_halo_kernel2_yvel_minus_2_top_gpu(double *yvel0, double *yvel1, const int* fields) { if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = -yvel0[OPS_ACC0(0,-2,0)]; if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = -yvel1[OPS_ACC1(0,-2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_minus_2_top( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_yvel_minus_2_top + idx_z * 1*1 * xdim0_update_halo_kernel2_yvel_minus_2_top * ydim0_update_halo_kernel2_yvel_minus_2_top; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_yvel_minus_2_top + idx_z * 1*1 * xdim1_update_halo_kernel2_yvel_minus_2_top * ydim1_update_halo_kernel2_yvel_minus_2_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_minus_2_top_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_minus_2_top(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_yvel_minus_2_top_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,38)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(38,"update_halo_kernel2_yvel_minus_2_top"); OPS_kernels[38].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_2_top_h || ydim0 != ydim0_update_halo_kernel2_yvel_minus_2_top_h || xdim1 != xdim1_update_halo_kernel2_yvel_minus_2_top_h || ydim1 != ydim1_update_halo_kernel2_yvel_minus_2_top_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_minus_2_top, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_yvel_minus_2_top_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_minus_2_top, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_yvel_minus_2_top_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_minus_2_top, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_yvel_minus_2_top_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_minus_2_top, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_yvel_minus_2_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[38].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_minus_2_top), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[38].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[38].mpi_time += t2-t1; OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_minus_2_top(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 38; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 38; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_yvel_minus_2_top_execute; if (OPS_diags > 1) { ops_timing_realloc(38,"update_halo_kernel2_yvel_minus_2_top"); } ops_enqueue_kernel(desc); } #endif
7c3a81cc044be8279ebcdd9689bdcc1b7c19e14e.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_minus_2_top; int xdim0_update_halo_kernel2_yvel_minus_2_top_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_minus_2_top; int ydim0_update_halo_kernel2_yvel_minus_2_top_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_minus_2_top; int xdim1_update_halo_kernel2_yvel_minus_2_top_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_minus_2_top; int ydim1_update_halo_kernel2_yvel_minus_2_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_minus_2_top*(y)+xdim0_update_halo_kernel2_yvel_minus_2_top*ydim0_update_halo_kernel2_yvel_minus_2_top*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_minus_2_top*(y)+xdim1_update_halo_kernel2_yvel_minus_2_top*ydim1_update_halo_kernel2_yvel_minus_2_top*(z)) //user function __device__ inline void update_halo_kernel2_yvel_minus_2_top_gpu(double *yvel0, double *yvel1, const int* fields) { if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = -yvel0[OPS_ACC0(0,-2,0)]; if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = -yvel1[OPS_ACC1(0,-2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_minus_2_top( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_yvel_minus_2_top + idx_z * 1*1 * xdim0_update_halo_kernel2_yvel_minus_2_top * ydim0_update_halo_kernel2_yvel_minus_2_top; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_yvel_minus_2_top + idx_z * 1*1 * xdim1_update_halo_kernel2_yvel_minus_2_top * ydim1_update_halo_kernel2_yvel_minus_2_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_minus_2_top_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_minus_2_top(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_yvel_minus_2_top_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,38)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(38,"update_halo_kernel2_yvel_minus_2_top"); OPS_kernels[38].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_2_top_h || ydim0 != ydim0_update_halo_kernel2_yvel_minus_2_top_h || xdim1 != xdim1_update_halo_kernel2_yvel_minus_2_top_h || ydim1 != ydim1_update_halo_kernel2_yvel_minus_2_top_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_minus_2_top, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_yvel_minus_2_top_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_minus_2_top, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_yvel_minus_2_top_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_minus_2_top, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_yvel_minus_2_top_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_minus_2_top, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_yvel_minus_2_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[38].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel2_yvel_minus_2_top<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[38].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[38].mpi_time += t2-t1; OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[38].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_minus_2_top(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 38; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 38; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_yvel_minus_2_top_execute; if (OPS_diags > 1) { ops_timing_realloc(38,"update_halo_kernel2_yvel_minus_2_top"); } ops_enqueue_kernel(desc); } #endif
32a64fc71be4d7693d04717dd23cb46c9157bd78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <string> #include <math.h> #include <stdio.h> #define numBlocks 12 #define numThreads 32 struct RSA_KEY { unsigned long p; // selected prime 1 unsigned long q; // selected prime 2 unsigned long n; // public - the modulus unsigned long e; // public - for encryption unsigned long d; // private - for decryption }; // Function prototypes RSA_KEY generate_RSA_key(unsigned long p, unsigned long q); void print_RSA_key(RSA_KEY in_key); void RSA_encode( char *input, size_t input_size, unsigned long long *output, size_t output_size, unsigned long e, unsigned long n); void RSA_decode( unsigned long long *input, size_t input_size, char *output, size_t output_size, unsigned long d, unsigned long n); int gcd(int a, int b); int modulo(int a, int b, int n); __device__ int is_prime(unsigned long input); // RSA Cracking Kernel __global__ void findPrime(unsigned long n, unsigned long roundedN) { // Round the input modulus to nearest power of 2 unsigned long rangeRounded = 2 << roundedN; // Sanity dictates that both primes should be < half the modulus unsigned long rangeTotal = rangeRounded / 2; // Determine min & max range for this thread unsigned long index = blockIdx.x * numThreads + threadIdx.x; unsigned long rangeLow = rangeTotal / (numBlocks * numThreads) * index; unsigned long rangeHigh = rangeTotal / (numBlocks * numThreads) * (index + 1) - 1; //printf("Thread %d reporting in N:%d | %d to %d\n", index, n, rangeLow, rangeHigh); // Loop through range and search for primes unsigned long output = 0; for (unsigned long myindex = rangeLow; myindex < rangeHigh; myindex++) { if (is_prime(myindex)) { if (n % myindex == 0) { output = myindex; printf("prime: %d\n", myindex); } } } // Debug Print if (output != 0) printf("B:%d T:%d I:%d Range: %8d to %8d of %8d RESULT: %d\n", blockIdx.x, threadIdx.x, index, rangeLow, rangeHigh, rangeTotal, output); } int main() { // Message to encode char secret_message[] = "The quick brown fox jumped over the lazy dog."; printf("Message: %s\n\n",secret_message); // Generate public & private key printf("Generating key...\n"); RSA_KEY my_key; unsigned long prime1 = 157; unsigned long prime2 = 199; my_key = generate_RSA_key(prime1, prime2); print_RSA_key(my_key); // Encode message using public key printf("Encrypting message...\n"); unsigned long long ciphertext[50]; RSA_encode(secret_message, sizeof secret_message, ciphertext, sizeof ciphertext, my_key.e, my_key.n); // Print the ciphertext printf("Ciphertext : "); for (int i = 0; i < sizeof(secret_message); i++) { if (i % 10 == 0) { printf("\n"); } printf("%6d ", ciphertext[i]); } // Decrypt message using private key printf("\n\nDecrypting using private key...\n"); char decrypt_message[50]; RSA_decode(ciphertext, sizeof ciphertext, decrypt_message, sizeof decrypt_message, my_key.d, my_key.n); printf("Decrypted message: %s\n\n", decrypt_message); // Attempt to bruteforce find the private key hipLaunchKernelGGL(( findPrime) , dim3(numBlocks), dim3(numThreads) , 0, 0, my_key.n, log2(my_key.n)); hipDeviceSynchronize(); // Error checking hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); //printf("%f\n", 31243 % 10239); // Decrypt message using cracked key printf("\nEnd Program\n"); } RSA_KEY generate_RSA_key(unsigned long p, unsigned long q) { RSA_KEY ret_str; //ret_str.p = 157; // TODO: hardcoded for now - needs random generation //ret_str.q = 199; // TODO: hardcoded for now - needs random generation ret_str.p = p; ret_str.q = q; // Calculate modulus ret_str.n = ret_str.p * ret_str.q; // Calculate totient int totient = (ret_str.p - 1) * (ret_str.q - 1); printf("Totient: %d\n", totient); // Calculate public key exponent 'e' int temp_e = 0; while (true) { temp_e = rand() % totient + 1; // random int 1 < e < totient if (gcd(temp_e, totient) == 1) { break; } } ret_str.e = temp_e; // Calculate private key exponent 'd' int temp_d = 0; int diff; while (true) { temp_d++; diff = (temp_d * ret_str.e) - 1; if(diff % totient == 0) { break; } } ret_str.d = temp_d; return ret_str; } void print_RSA_key(RSA_KEY in_key) { printf("RSA Key: p = %d\n", in_key.p); printf("RSA Key: q = %d\n", in_key.q); printf("RSA Key: n = %d\n", in_key.n); printf("RSA Key: e = %d\n", in_key.e); printf("RSA Key: d = %d\n", in_key.d); printf("\n"); } // Greatest Common Denominator function // Courtest of: https://codereview.stackexchange.com/a/39110 int gcd(int a, int b) { int x; while (b) { x = a % b; a = b; b = x; } return a; } // RSA Message encoder void RSA_encode( char *input, size_t input_size, unsigned long long *output, size_t output_size, unsigned long e, unsigned long n) { unsigned long long m,c; //printf("e: %d n: %d\n", e, n); // Convert message string to integer for (int i = 0; i < input_size; i++) { m = (int)input[i]; //printf("m: %d ", m); //p = pow(m, e); printf("p: %d\n", p); //c = p % n; c = modulo(m, e, n); //printf("c: %d\n", c); output[i] = c; } } // RSA Message decoder void RSA_decode( unsigned long long *input, size_t input_size, char *output, size_t output_size, unsigned long d, unsigned long n) { for (int i = 0; i < output_size; i++) { output[i] = modulo(input[i], d, n); } } // Modulo Function for massive powers // Courtest of: https://stackoverflow.com/a/36398956 int modulo(int a, int b, int n) { long long x = 1, y = a; while (b > 0) { if (b % 2 == 1) { x = (x*y) % n; } y = (y*y) % n; // squaring the base b /= 2; } return x%n; } // Test if a number is prime __device__ int is_prime(unsigned long input) { //if (input == 1) //return 0; for (unsigned long k = 2; k < input; k++) { if (input % k == 0) { return 0; } } return 1; }
32a64fc71be4d7693d04717dd23cb46c9157bd78.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <string> #include <math.h> #include <stdio.h> #define numBlocks 12 #define numThreads 32 struct RSA_KEY { unsigned long p; // selected prime 1 unsigned long q; // selected prime 2 unsigned long n; // public - the modulus unsigned long e; // public - for encryption unsigned long d; // private - for decryption }; // Function prototypes RSA_KEY generate_RSA_key(unsigned long p, unsigned long q); void print_RSA_key(RSA_KEY in_key); void RSA_encode( char *input, size_t input_size, unsigned long long *output, size_t output_size, unsigned long e, unsigned long n); void RSA_decode( unsigned long long *input, size_t input_size, char *output, size_t output_size, unsigned long d, unsigned long n); int gcd(int a, int b); int modulo(int a, int b, int n); __device__ int is_prime(unsigned long input); // RSA Cracking Kernel __global__ void findPrime(unsigned long n, unsigned long roundedN) { // Round the input modulus to nearest power of 2 unsigned long rangeRounded = 2 << roundedN; // Sanity dictates that both primes should be < half the modulus unsigned long rangeTotal = rangeRounded / 2; // Determine min & max range for this thread unsigned long index = blockIdx.x * numThreads + threadIdx.x; unsigned long rangeLow = rangeTotal / (numBlocks * numThreads) * index; unsigned long rangeHigh = rangeTotal / (numBlocks * numThreads) * (index + 1) - 1; //printf("Thread %d reporting in N:%d | %d to %d\n", index, n, rangeLow, rangeHigh); // Loop through range and search for primes unsigned long output = 0; for (unsigned long myindex = rangeLow; myindex < rangeHigh; myindex++) { if (is_prime(myindex)) { if (n % myindex == 0) { output = myindex; printf("prime: %d\n", myindex); } } } // Debug Print if (output != 0) printf("B:%d T:%d I:%d Range: %8d to %8d of %8d RESULT: %d\n", blockIdx.x, threadIdx.x, index, rangeLow, rangeHigh, rangeTotal, output); } int main() { // Message to encode char secret_message[] = "The quick brown fox jumped over the lazy dog."; printf("Message: %s\n\n",secret_message); // Generate public & private key printf("Generating key...\n"); RSA_KEY my_key; unsigned long prime1 = 157; unsigned long prime2 = 199; my_key = generate_RSA_key(prime1, prime2); print_RSA_key(my_key); // Encode message using public key printf("Encrypting message...\n"); unsigned long long ciphertext[50]; RSA_encode(secret_message, sizeof secret_message, ciphertext, sizeof ciphertext, my_key.e, my_key.n); // Print the ciphertext printf("Ciphertext : "); for (int i = 0; i < sizeof(secret_message); i++) { if (i % 10 == 0) { printf("\n"); } printf("%6d ", ciphertext[i]); } // Decrypt message using private key printf("\n\nDecrypting using private key...\n"); char decrypt_message[50]; RSA_decode(ciphertext, sizeof ciphertext, decrypt_message, sizeof decrypt_message, my_key.d, my_key.n); printf("Decrypted message: %s\n\n", decrypt_message); // Attempt to bruteforce find the private key findPrime <<< numBlocks, numThreads >>> (my_key.n, log2(my_key.n)); cudaDeviceSynchronize(); // Error checking cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); //printf("%f\n", 31243 % 10239); // Decrypt message using cracked key printf("\nEnd Program\n"); } RSA_KEY generate_RSA_key(unsigned long p, unsigned long q) { RSA_KEY ret_str; //ret_str.p = 157; // TODO: hardcoded for now - needs random generation //ret_str.q = 199; // TODO: hardcoded for now - needs random generation ret_str.p = p; ret_str.q = q; // Calculate modulus ret_str.n = ret_str.p * ret_str.q; // Calculate totient int totient = (ret_str.p - 1) * (ret_str.q - 1); printf("Totient: %d\n", totient); // Calculate public key exponent 'e' int temp_e = 0; while (true) { temp_e = rand() % totient + 1; // random int 1 < e < totient if (gcd(temp_e, totient) == 1) { break; } } ret_str.e = temp_e; // Calculate private key exponent 'd' int temp_d = 0; int diff; while (true) { temp_d++; diff = (temp_d * ret_str.e) - 1; if(diff % totient == 0) { break; } } ret_str.d = temp_d; return ret_str; } void print_RSA_key(RSA_KEY in_key) { printf("RSA Key: p = %d\n", in_key.p); printf("RSA Key: q = %d\n", in_key.q); printf("RSA Key: n = %d\n", in_key.n); printf("RSA Key: e = %d\n", in_key.e); printf("RSA Key: d = %d\n", in_key.d); printf("\n"); } // Greatest Common Denominator function // Courtest of: https://codereview.stackexchange.com/a/39110 int gcd(int a, int b) { int x; while (b) { x = a % b; a = b; b = x; } return a; } // RSA Message encoder void RSA_encode( char *input, size_t input_size, unsigned long long *output, size_t output_size, unsigned long e, unsigned long n) { unsigned long long m,c; //printf("e: %d n: %d\n", e, n); // Convert message string to integer for (int i = 0; i < input_size; i++) { m = (int)input[i]; //printf("m: %d ", m); //p = pow(m, e); printf("p: %d\n", p); //c = p % n; c = modulo(m, e, n); //printf("c: %d\n", c); output[i] = c; } } // RSA Message decoder void RSA_decode( unsigned long long *input, size_t input_size, char *output, size_t output_size, unsigned long d, unsigned long n) { for (int i = 0; i < output_size; i++) { output[i] = modulo(input[i], d, n); } } // Modulo Function for massive powers // Courtest of: https://stackoverflow.com/a/36398956 int modulo(int a, int b, int n) { long long x = 1, y = a; while (b > 0) { if (b % 2 == 1) { x = (x*y) % n; } y = (y*y) % n; // squaring the base b /= 2; } return x%n; } // Test if a number is prime __device__ int is_prime(unsigned long input) { //if (input == 1) //return 0; for (unsigned long k = 2; k < input; k++) { if (input % k == 0) { return 0; } } return 1; }
1b21b11ebff902440cab03dab81babda9cad3587.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define STRIDE 2 __global__ void kernel3(int m, int n, int k, double *d_A, double *d_B, double *d_C){ int i = (blockIdx.y * blockDim.y + threadIdx.y) * STRIDE; int j = blockIdx.x * blockDim.x + threadIdx.x; int sum1=0; for(int s1 = 0; s1 < STRIDE; s1++){ sum1 = i + s1; if (sum1 < m && j < n){ d_C[sum1*n + j] = 0.0; for(int s = 0; s < k; s++){ d_C[sum1*n + j] += d_A[sum1*k + s] * d_B[s*n + j]; } } } } extern "C" { void matmult_gpu3(int m, int n, int k, double *A, double *B, double *C) { double *d_A, *d_B, *d_C; //variable on device int size_matrix_A = m * k * sizeof(double); hipMalloc((void**)&d_A, size_matrix_A); // allocate memory on GPU int size_matrix_B = k * n * sizeof(double); hipMalloc((void**)&d_B, size_matrix_B); int size_matrix_C = m * n * sizeof(double); hipMalloc((void**)&d_C, size_matrix_C); //copy A and B to GPU hipMemcpy(d_A, A, size_matrix_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, size_matrix_B, hipMemcpyHostToDevice); dim3 dimBlock(16,16,1); dim3 dimGrid((m -1)/dimBlock.x+1,(n/STRIDE-1)/dimBlock.y+1) ; hipLaunchKernelGGL(( kernel3), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C); hipDeviceSynchronize(); //transfer C back to CPU hipMemcpy(C, d_C, size_matrix_C, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); } }
1b21b11ebff902440cab03dab81babda9cad3587.cu
#define STRIDE 2 __global__ void kernel3(int m, int n, int k, double *d_A, double *d_B, double *d_C){ int i = (blockIdx.y * blockDim.y + threadIdx.y) * STRIDE; int j = blockIdx.x * blockDim.x + threadIdx.x; int sum1=0; for(int s1 = 0; s1 < STRIDE; s1++){ sum1 = i + s1; if (sum1 < m && j < n){ d_C[sum1*n + j] = 0.0; for(int s = 0; s < k; s++){ d_C[sum1*n + j] += d_A[sum1*k + s] * d_B[s*n + j]; } } } } extern "C" { void matmult_gpu3(int m, int n, int k, double *A, double *B, double *C) { double *d_A, *d_B, *d_C; //variable on device int size_matrix_A = m * k * sizeof(double); cudaMalloc((void**)&d_A, size_matrix_A); // allocate memory on GPU int size_matrix_B = k * n * sizeof(double); cudaMalloc((void**)&d_B, size_matrix_B); int size_matrix_C = m * n * sizeof(double); cudaMalloc((void**)&d_C, size_matrix_C); //copy A and B to GPU cudaMemcpy(d_A, A, size_matrix_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size_matrix_B, cudaMemcpyHostToDevice); dim3 dimBlock(16,16,1); dim3 dimGrid((m -1)/dimBlock.x+1,(n/STRIDE-1)/dimBlock.y+1) ; kernel3<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C); cudaDeviceSynchronize(); //transfer C back to CPU cudaMemcpy(C, d_C, size_matrix_C, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } }
7325765420f18dd852643ba957d9d7cc5af0cdfa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <cstdlib> #include <cstdio> #include "Matching.h" #include "const.h" //#include "cam.h" #include "WindowMatching.h" #include "timer.h" #include <cmath> using namespace cv; static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __device__ float weighted[WINDOW_SIZE*WINDOW_SIZE]; __device__ float wa0[IM_SIZE]; __device__ float wa1[IM_SIZE]; __device__ float wac0[IMAGE_WIDTH*IMAGE_HEIGHT]; __device__ float wac1[IMAGE_WIDTH*IMAGE_HEIGHT]; //__device__ float ep0[EP_SIZE]; __device__ float* pos; __global__ void weightedAverage(uchar const *im0, uchar const *im1) { // calculate weighted if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x < WINDOW_SIZE && threadIdx.y < WINDOW_SIZE) { int a = WINDOW_SIZE -1; float cos_x = cos(M_PI * (static_cast<int>(threadIdx.x) - WINDOW_SIZE/2) / a); float cos_y = cos(M_PI * (static_cast<int>(threadIdx.y) - WINDOW_SIZE/2) / a); weighted[threadIdx.y*WINDOW_SIZE+threadIdx.x] = cos_x*cos_x*cos_y*cos_y; //printf("threadIdx.y %d, threadIdx.x %d: weight=%f \n", threadIdx.y, threadIdx.x, weighted[threadIdx.y*WINDOW_SIZE+threadIdx.x]); } __syncthreads(); __shared__ uchar cache[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // load image values from global memory to shared memory uchar const *img = (blockIdx.z == 0) ? im0 : im1; for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = blockIdx.x * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? img[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; } } } __syncthreads(); // calculate weighted average //* int idx_y = blockIdx.y*blockDim.y+threadIdx.y; int idx_x = blockIdx.x*blockDim.x+threadIdx.x; if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) { float tmp_r = 0.0; float tmp_g = 0.0; float tmp_b = 0.0; for (int i = 0; i < WINDOW_SIZE; ++i) for (int j = 0; j < WINDOW_SIZE; ++j) { float weight = weighted[i*WINDOW_SIZE+j]; int cache_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (threadIdx.x+j)*NUM_COLOUR_CHANNEL; tmp_r += (weight * cache[cache_idx_base]); tmp_g += (weight * cache[cache_idx_base+1]); tmp_b += (weight * cache[cache_idx_base+2]); } tmp_r /= 9; tmp_g /= 9; tmp_b /= 9; //* if (blockIdx.x==0 && blockIdx.y==0&&blockIdx.z==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[%f, %f, %f] ", tmp_r, tmp_g, tmp_b); }//*/ int write_to_idx = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x*NUM_COLOUR_CHANNEL; float *wa = (blockIdx.z == 0) ? wa0 : wa1; wa[write_to_idx] = tmp_r; wa[write_to_idx+1] = tmp_g; wa[write_to_idx+2] = tmp_b;//*/ //* if (blockIdx.x==0 && blockIdx.y==0&&blockIdx.z==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[%f, %f, %f] ", wa[write_to_idx], wa[write_to_idx+1], wa[write_to_idx+2]); }//*/ } } //* __global__ void wac_gpu(uchar const *im0, uchar const *im1) { __shared__ uchar cache[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // load image values from global memory to shared memory uchar const *img = (blockIdx.z == 0) ? im0 : im1; for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = blockIdx.x * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? img[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; } } } __syncthreads(); /* if (blockIdx.x==0 && blockIdx.y==0&&threadIdx.x==0&&threadIdx.y==0) { for (int i=0; i<38; ++i) { for (int j=0; j<38; ++j) printf("%f ", wa0[i*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+j*NUM_COLOUR_CHANNEL]); printf("\n"); } }//*/ // calculate weighted average correlation //* int idx_y = blockIdx.y*blockDim.y+threadIdx.y; int idx_x = blockIdx.x*blockDim.x+threadIdx.x; if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) { float tmp = 0.0; int wa_idx_base = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x*NUM_COLOUR_CHANNEL; for (int i = 0; i < WINDOW_SIZE; ++i) for (int j = 0; j < WINDOW_SIZE; ++j) { int cache_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (threadIdx.x+j)*NUM_COLOUR_CHANNEL; float *wa = (blockIdx.z == 0) ? wa0 : wa1; tmp += (weighted[i*WINDOW_SIZE+j] * ((cache[cache_idx_base]-wa[wa_idx_base]) * (cache[cache_idx_base]-wa[wa_idx_base]) + (cache[cache_idx_base+1]-wa[wa_idx_base+1]) * (cache[cache_idx_base+1]-wa[wa_idx_base+1]) + (cache[cache_idx_base+2]-wa[wa_idx_base+2]) * (cache[cache_idx_base+2]-wa[wa_idx_base+2]))); } //* if (blockIdx.x==0 && blockIdx.y==0&&blockIdx.z==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[wa:%f, tmp:%f]\n", wa0[wa_idx_base], tmp); }//*/ int write_to_idx = idx_y*IMAGE_WIDTH + idx_x; float *wac = (blockIdx.z == 0) ? wac0 : wac1; wac[write_to_idx] = tmp;//*/ } } __host__ __device__ float biLinInt(int x1, int x2, int y1, int y2, float x, float y, float q11, float q21, float q12, float q22) { if (y2 == y1) { if (x2 == x1) return q11; else return 1/(x2-x1) * (q11*(x2-x) + q21*(x-x1)); } else if (x2 == x1) { return 1/(y2-y1) * (q11*(y2-y) + q12*(y-y1)); } else return 1/(x2-x1)/(y2-y1) * (q11*(x2-x)*(y2-y) + q21*(x-x1)*(y2-y) + q12*(x2-x)*(y-y1) + q22*(x-x1)*(y-y1)); } __global__ void wcc(uchar const *im0, uchar const *im1, float *depth) { // load an image block of 38x38 belongs to rerefence image into shared memory __shared__ uchar cache0[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // TODO: copy loading code to weightedAverage() and wac_gpu() for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = blockIdx.x * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache0[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? im0[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; } } } __syncthreads(); /* if (blockIdx.x==0&&blockIdx.y==0&&threadIdx.x==0&&threadIdx.y==0) { for (int i=0; i<38; ++i) { for (int j=0; j<38; ++j) printf("%d ", cache0[i*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL+j*NUM_COLOUR_CHANNEL]); printf("\n"); } } //*/ // define global best score and global pos_x, pos_y float best_score = 0.0f; float pos_x = 0.0f; float pos_y = 0.0f; // calculate indices for later use int idx_y = blockIdx.y*blockDim.y+threadIdx.y; int idx_x = blockIdx.x*blockDim.x+threadIdx.x; int wa0_idx_base = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x*NUM_COLOUR_CHANNEL; int wac0_idx = idx_y*IMAGE_WIDTH + idx_x; __shared__ uchar cache1[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // alternatively load image block of 38x38 belongs to target image into shared memory for (int load_idx = 0; load_idx < ceil(static_cast<float>(IMAGE_WIDTH)/blockDim.x); ++load_idx) { for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = load_idx * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache1[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? im1[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; //TODO: remember to recover im1 } } } __syncthreads(); /* if (blockIdx.x==0&&blockIdx.y==0&&threadIdx.x==0&&threadIdx.y==0&&load_idx==0) { for (int i=0; i<38; ++i) { for (int j=0; j<38; ++j) { printf("%d ", cache1[i*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL+j*NUM_COLOUR_CHANNEL]); } printf("\n"); } } //*/ if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) { // calculate the local best match along the epipolar line float local_best_score = 0.0f; float local_pos_x = 0.0f; float local_pos_y = 0.0f; int idx_x1; for (int target_offset = 0; target_offset < blockDim.x && (idx_x1=load_idx*blockDim.x + target_offset) < IMAGE_WIDTH; ++target_offset) { float tmp = 0.0f; int wa1_idx_base = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x1*NUM_COLOUR_CHANNEL; int wac1_idx = idx_y*IMAGE_WIDTH + idx_x1; for (int i = 0; i < WINDOW_SIZE; ++i) for (int j = 0; j < WINDOW_SIZE; ++j) { int cache0_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (threadIdx.x+j)*NUM_COLOUR_CHANNEL; int cache1_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (target_offset+j)*NUM_COLOUR_CHANNEL; tmp += (weighted[i*WINDOW_SIZE+j] * ((cache0[cache0_idx_base]-wa0[wa0_idx_base]) * (cache1[cache1_idx_base]-wa1[wa1_idx_base]) + (cache0[cache0_idx_base+1]-wa0[wa0_idx_base+1]) * (cache1[cache1_idx_base+1]-wa1[wa1_idx_base+1]) + (cache0[cache0_idx_base+2]-wa0[wa0_idx_base+2]) * (cache1[cache1_idx_base+2]-wa1[wa1_idx_base+2]))); } tmp /= sqrt(wac0[wac0_idx] * wac1[wac1_idx] * 9); // update local best match if (tmp > local_best_score) { local_best_score = tmp; local_pos_x = idx_x1; local_pos_y = idx_y; } /* if (blockIdx.x==0 && blockIdx.y==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[local_best_score:%f, local_pos_x:%f, idx_x1:%d]\n", local_best_score, local_pos_x, idx_x1); }//*/ } // update global best match if (local_best_score > best_score) { best_score = local_best_score; pos_x = local_pos_x; pos_y = local_pos_y; } /* if (blockIdx.x==0 && blockIdx.y==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[best_score:%f, pos_x:%f, local_pos_x:%f]\n", best_score, pos_x, local_pos_x); }//*/ } } if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) depth[wac0_idx] = (IMAGE_WIDTH - pos_x + idx_x) / (2*IMAGE_WIDTH); //* if (blockIdx.x==0 && blockIdx.y==0) { if (threadIdx.x == 30 && threadIdx.y == 30) { printf("[idx_y:%d, idx_x:%d, pos_x:%f]\n", idx_y, idx_x, pos_x); } }//*/ } int main(int argc, char** argv) { //* Mat imHost0 = imread(argv[1], CV_LOAD_IMAGE_COLOR); Mat imHost1 = imread(argv[2], CV_LOAD_IMAGE_COLOR); /* for (int i=349; i<375; ++i) { for (int j=445; j<450; ++j) std::cout<<imHost0.at<Vec3b>(i, j)<<" "; printf("\n"); } //for (int i=0; i<36; ++i) { // printf("%d ", imHost0.data[i]); //}//*/ /* for (int i = 0; i < IMAGE_HEIGHT; ++i) { for (int j = 0; j < 255; ++j) { imHost0.at<Vec3b>(i, j) = Vec3b(1*(j+0), 1*(j+0), 1*(j+0)); imHost1.at<Vec3b>(i, j) = Vec3b(1*(j+0), 1*(j+0), 1*(j+0)); } }//*/ uchar* im0; uchar* im1; HANDLE_ERROR(hipMalloc((void**) &im0, IM_SIZE*sizeof(uchar))); HANDLE_ERROR(hipMalloc((void**) &im1, IM_SIZE*sizeof(uchar))); /* float* score; HANDLE_ERROR(hipMalloc((void**) &score, WAC_SIZE*sizeof(float))); float* pos; HANDLE_ERROR(hipMalloc((void**) &pos, POS_SIZE*sizeof(float)));//*/ float* depth; HANDLE_ERROR(hipMalloc((void**) &depth, IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float))); GpuTimer timer; timer.Start(); //uchar imHostptr0[IM_SIZE]; //uchar imHostptr1[IM_SIZE]; //memcpy(imHostptr0, imHost0.data, IM_SIZE*sizeof(uchar)); //memcpy(imHostptr1, imHost1.data, IM_SIZE*sizeof(uchar)); // Note that following 2 lines do not always work, need to use intermediate pointers HANDLE_ERROR(hipMemcpy(im0, imHost0.data, IM_SIZE * sizeof(uchar), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(im1, imHost1.data, IM_SIZE * sizeof(uchar), hipMemcpyHostToDevice)); dim3 dimGrid(ceil(static_cast<float>(IMAGE_WIDTH)/NUM_THREADS), ceil(static_cast<float>(IMAGE_HEIGHT)/NUM_THREADS), 2); dim3 dimBlock(NUM_THREADS, NUM_THREADS); hipLaunchKernelGGL(( weightedAverage), dim3(dimGrid), dim3(dimBlock), 0, 0, im0, im1); hipLaunchKernelGGL(( wac_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, im0, im1); dimGrid = dim3(ceil(static_cast<float>(IMAGE_WIDTH)/NUM_THREADS), ceil(static_cast<float>(IMAGE_HEIGHT)/NUM_THREADS)); hipLaunchKernelGGL(( wcc), dim3(dimGrid), dim3(dimBlock), 0, 0, im0, im1, depth); timer.Stop(); Mat depthIm(IMAGE_HEIGHT, IMAGE_WIDTH, CV_32F, Scalar(0.0)); //float *tdepth = new float[WAC_SIZE]; HANDLE_ERROR(hipMemcpy(depthIm.data, depth, sizeof(float)*IMAGE_WIDTH*IMAGE_HEIGHT, hipMemcpyDeviceToHost)); printf("\n"); depthIm.convertTo(depthIm, CV_8U, 255.0); printf("Your code ran in: %f msecs.\n", timer.Elapsed()); imshow("depth image", depthIm); waitKey(0); std::vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); imwrite("output_gpu.png", depthIm, compression_params); HANDLE_ERROR(hipFree(im0)); HANDLE_ERROR(hipFree(im1)); im0 = NULL; im1 = NULL; //HANDLE_ERROR(hipFree(score)); HANDLE_ERROR(hipFree(depth));//*/ return 0; }
7325765420f18dd852643ba957d9d7cc5af0cdfa.cu
// nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <cstdlib> #include <cstdio> #include "Matching.h" #include "const.h" //#include "cam.h" #include "WindowMatching.h" #include "timer.h" #include <cmath> using namespace cv; static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __device__ float weighted[WINDOW_SIZE*WINDOW_SIZE]; __device__ float wa0[IM_SIZE]; __device__ float wa1[IM_SIZE]; __device__ float wac0[IMAGE_WIDTH*IMAGE_HEIGHT]; __device__ float wac1[IMAGE_WIDTH*IMAGE_HEIGHT]; //__device__ float ep0[EP_SIZE]; __device__ float* pos; __global__ void weightedAverage(uchar const *im0, uchar const *im1) { // calculate weighted if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x < WINDOW_SIZE && threadIdx.y < WINDOW_SIZE) { int a = WINDOW_SIZE -1; float cos_x = cos(M_PI * (static_cast<int>(threadIdx.x) - WINDOW_SIZE/2) / a); float cos_y = cos(M_PI * (static_cast<int>(threadIdx.y) - WINDOW_SIZE/2) / a); weighted[threadIdx.y*WINDOW_SIZE+threadIdx.x] = cos_x*cos_x*cos_y*cos_y; //printf("threadIdx.y %d, threadIdx.x %d: weight=%f \n", threadIdx.y, threadIdx.x, weighted[threadIdx.y*WINDOW_SIZE+threadIdx.x]); } __syncthreads(); __shared__ uchar cache[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // load image values from global memory to shared memory uchar const *img = (blockIdx.z == 0) ? im0 : im1; for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = blockIdx.x * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? img[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; } } } __syncthreads(); // calculate weighted average //* int idx_y = blockIdx.y*blockDim.y+threadIdx.y; int idx_x = blockIdx.x*blockDim.x+threadIdx.x; if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) { float tmp_r = 0.0; float tmp_g = 0.0; float tmp_b = 0.0; for (int i = 0; i < WINDOW_SIZE; ++i) for (int j = 0; j < WINDOW_SIZE; ++j) { float weight = weighted[i*WINDOW_SIZE+j]; int cache_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (threadIdx.x+j)*NUM_COLOUR_CHANNEL; tmp_r += (weight * cache[cache_idx_base]); tmp_g += (weight * cache[cache_idx_base+1]); tmp_b += (weight * cache[cache_idx_base+2]); } tmp_r /= 9; tmp_g /= 9; tmp_b /= 9; //* if (blockIdx.x==0 && blockIdx.y==0&&blockIdx.z==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[%f, %f, %f] ", tmp_r, tmp_g, tmp_b); }//*/ int write_to_idx = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x*NUM_COLOUR_CHANNEL; float *wa = (blockIdx.z == 0) ? wa0 : wa1; wa[write_to_idx] = tmp_r; wa[write_to_idx+1] = tmp_g; wa[write_to_idx+2] = tmp_b;//*/ //* if (blockIdx.x==0 && blockIdx.y==0&&blockIdx.z==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[%f, %f, %f] ", wa[write_to_idx], wa[write_to_idx+1], wa[write_to_idx+2]); }//*/ } } //* __global__ void wac_gpu(uchar const *im0, uchar const *im1) { __shared__ uchar cache[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // load image values from global memory to shared memory uchar const *img = (blockIdx.z == 0) ? im0 : im1; for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = blockIdx.x * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? img[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; } } } __syncthreads(); /* if (blockIdx.x==0 && blockIdx.y==0&&threadIdx.x==0&&threadIdx.y==0) { for (int i=0; i<38; ++i) { for (int j=0; j<38; ++j) printf("%f ", wa0[i*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+j*NUM_COLOUR_CHANNEL]); printf("\n"); } }//*/ // calculate weighted average correlation //* int idx_y = blockIdx.y*blockDim.y+threadIdx.y; int idx_x = blockIdx.x*blockDim.x+threadIdx.x; if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) { float tmp = 0.0; int wa_idx_base = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x*NUM_COLOUR_CHANNEL; for (int i = 0; i < WINDOW_SIZE; ++i) for (int j = 0; j < WINDOW_SIZE; ++j) { int cache_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (threadIdx.x+j)*NUM_COLOUR_CHANNEL; float *wa = (blockIdx.z == 0) ? wa0 : wa1; tmp += (weighted[i*WINDOW_SIZE+j] * ((cache[cache_idx_base]-wa[wa_idx_base]) * (cache[cache_idx_base]-wa[wa_idx_base]) + (cache[cache_idx_base+1]-wa[wa_idx_base+1]) * (cache[cache_idx_base+1]-wa[wa_idx_base+1]) + (cache[cache_idx_base+2]-wa[wa_idx_base+2]) * (cache[cache_idx_base+2]-wa[wa_idx_base+2]))); } //* if (blockIdx.x==0 && blockIdx.y==0&&blockIdx.z==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[wa:%f, tmp:%f]\n", wa0[wa_idx_base], tmp); }//*/ int write_to_idx = idx_y*IMAGE_WIDTH + idx_x; float *wac = (blockIdx.z == 0) ? wac0 : wac1; wac[write_to_idx] = tmp;//*/ } } __host__ __device__ float biLinInt(int x1, int x2, int y1, int y2, float x, float y, float q11, float q21, float q12, float q22) { if (y2 == y1) { if (x2 == x1) return q11; else return 1/(x2-x1) * (q11*(x2-x) + q21*(x-x1)); } else if (x2 == x1) { return 1/(y2-y1) * (q11*(y2-y) + q12*(y-y1)); } else return 1/(x2-x1)/(y2-y1) * (q11*(x2-x)*(y2-y) + q21*(x-x1)*(y2-y) + q12*(x2-x)*(y-y1) + q22*(x-x1)*(y-y1)); } __global__ void wcc(uchar const *im0, uchar const *im1, float *depth) { // load an image block of 38x38 belongs to rerefence image into shared memory __shared__ uchar cache0[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // TODO: copy loading code to weightedAverage() and wac_gpu() for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = blockIdx.x * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache0[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? im0[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; } } } __syncthreads(); /* if (blockIdx.x==0&&blockIdx.y==0&&threadIdx.x==0&&threadIdx.y==0) { for (int i=0; i<38; ++i) { for (int j=0; j<38; ++j) printf("%d ", cache0[i*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL+j*NUM_COLOUR_CHANNEL]); printf("\n"); } } //*/ // define global best score and global pos_x, pos_y float best_score = 0.0f; float pos_x = 0.0f; float pos_y = 0.0f; // calculate indices for later use int idx_y = blockIdx.y*blockDim.y+threadIdx.y; int idx_x = blockIdx.x*blockDim.x+threadIdx.x; int wa0_idx_base = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x*NUM_COLOUR_CHANNEL; int wac0_idx = idx_y*IMAGE_WIDTH + idx_x; __shared__ uchar cache1[(NUM_THREADS+WINDOW_SIZE-1)*(NUM_THREADS+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL]; // alternatively load image block of 38x38 belongs to target image into shared memory for (int load_idx = 0; load_idx < ceil(static_cast<float>(IMAGE_WIDTH)/blockDim.x); ++load_idx) { for (int start_y = 0; start_y < blockDim.y+WINDOW_SIZE; start_y += blockDim.y) { for (int start_x = 0; start_x < blockDim.x+WINDOW_SIZE; start_x += (blockDim.x/NUM_COLOUR_CHANNEL)) { int maxThreadIdx_x = NUM_THREADS/NUM_COLOUR_CHANNEL * NUM_COLOUR_CHANNEL; if (threadIdx.x < maxThreadIdx_x && start_x*NUM_COLOUR_CHANNEL+threadIdx.x < (blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL && start_y+threadIdx.y < blockDim.y+WINDOW_SIZE-1) { int idx_y = blockIdx.y * blockDim.y + (start_y + threadIdx.y) - WINDOW_SIZE/2; int idx_x = load_idx * blockDim.x * NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x) - WINDOW_SIZE/2*NUM_COLOUR_CHANNEL; cache1[(start_y+threadIdx.y)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (start_x*NUM_COLOUR_CHANNEL+threadIdx.x)] = (idx_y >= 0 && idx_y < IMAGE_HEIGHT && idx_x >= 0 && idx_x < IMAGE_WIDTH*NUM_COLOUR_CHANNEL) ? im1[idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL+idx_x] : 0; //TODO: remember to recover im1 } } } __syncthreads(); /* if (blockIdx.x==0&&blockIdx.y==0&&threadIdx.x==0&&threadIdx.y==0&&load_idx==0) { for (int i=0; i<38; ++i) { for (int j=0; j<38; ++j) { printf("%d ", cache1[i*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL+j*NUM_COLOUR_CHANNEL]); } printf("\n"); } } //*/ if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) { // calculate the local best match along the epipolar line float local_best_score = 0.0f; float local_pos_x = 0.0f; float local_pos_y = 0.0f; int idx_x1; for (int target_offset = 0; target_offset < blockDim.x && (idx_x1=load_idx*blockDim.x + target_offset) < IMAGE_WIDTH; ++target_offset) { float tmp = 0.0f; int wa1_idx_base = idx_y*IMAGE_WIDTH*NUM_COLOUR_CHANNEL + idx_x1*NUM_COLOUR_CHANNEL; int wac1_idx = idx_y*IMAGE_WIDTH + idx_x1; for (int i = 0; i < WINDOW_SIZE; ++i) for (int j = 0; j < WINDOW_SIZE; ++j) { int cache0_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (threadIdx.x+j)*NUM_COLOUR_CHANNEL; int cache1_idx_base = (threadIdx.y+i)*(blockDim.x+WINDOW_SIZE-1)*NUM_COLOUR_CHANNEL + (target_offset+j)*NUM_COLOUR_CHANNEL; tmp += (weighted[i*WINDOW_SIZE+j] * ((cache0[cache0_idx_base]-wa0[wa0_idx_base]) * (cache1[cache1_idx_base]-wa1[wa1_idx_base]) + (cache0[cache0_idx_base+1]-wa0[wa0_idx_base+1]) * (cache1[cache1_idx_base+1]-wa1[wa1_idx_base+1]) + (cache0[cache0_idx_base+2]-wa0[wa0_idx_base+2]) * (cache1[cache1_idx_base+2]-wa1[wa1_idx_base+2]))); } tmp /= sqrt(wac0[wac0_idx] * wac1[wac1_idx] * 9); // update local best match if (tmp > local_best_score) { local_best_score = tmp; local_pos_x = idx_x1; local_pos_y = idx_y; } /* if (blockIdx.x==0 && blockIdx.y==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[local_best_score:%f, local_pos_x:%f, idx_x1:%d]\n", local_best_score, local_pos_x, idx_x1); }//*/ } // update global best match if (local_best_score > best_score) { best_score = local_best_score; pos_x = local_pos_x; pos_y = local_pos_y; } /* if (blockIdx.x==0 && blockIdx.y==0&&threadIdx.x==3&&threadIdx.y==3) { printf("[best_score:%f, pos_x:%f, local_pos_x:%f]\n", best_score, pos_x, local_pos_x); }//*/ } } if (idx_y < IMAGE_HEIGHT && idx_x < IMAGE_WIDTH) depth[wac0_idx] = (IMAGE_WIDTH - pos_x + idx_x) / (2*IMAGE_WIDTH); //* if (blockIdx.x==0 && blockIdx.y==0) { if (threadIdx.x == 30 && threadIdx.y == 30) { printf("[idx_y:%d, idx_x:%d, pos_x:%f]\n", idx_y, idx_x, pos_x); } }//*/ } int main(int argc, char** argv) { //* Mat imHost0 = imread(argv[1], CV_LOAD_IMAGE_COLOR); Mat imHost1 = imread(argv[2], CV_LOAD_IMAGE_COLOR); /* for (int i=349; i<375; ++i) { for (int j=445; j<450; ++j) std::cout<<imHost0.at<Vec3b>(i, j)<<" "; printf("\n"); } //for (int i=0; i<36; ++i) { // printf("%d ", imHost0.data[i]); //}//*/ /* for (int i = 0; i < IMAGE_HEIGHT; ++i) { for (int j = 0; j < 255; ++j) { imHost0.at<Vec3b>(i, j) = Vec3b(1*(j+0), 1*(j+0), 1*(j+0)); imHost1.at<Vec3b>(i, j) = Vec3b(1*(j+0), 1*(j+0), 1*(j+0)); } }//*/ uchar* im0; uchar* im1; HANDLE_ERROR(cudaMalloc((void**) &im0, IM_SIZE*sizeof(uchar))); HANDLE_ERROR(cudaMalloc((void**) &im1, IM_SIZE*sizeof(uchar))); /* float* score; HANDLE_ERROR(cudaMalloc((void**) &score, WAC_SIZE*sizeof(float))); float* pos; HANDLE_ERROR(cudaMalloc((void**) &pos, POS_SIZE*sizeof(float)));//*/ float* depth; HANDLE_ERROR(cudaMalloc((void**) &depth, IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float))); GpuTimer timer; timer.Start(); //uchar imHostptr0[IM_SIZE]; //uchar imHostptr1[IM_SIZE]; //memcpy(imHostptr0, imHost0.data, IM_SIZE*sizeof(uchar)); //memcpy(imHostptr1, imHost1.data, IM_SIZE*sizeof(uchar)); // Note that following 2 lines do not always work, need to use intermediate pointers HANDLE_ERROR(cudaMemcpy(im0, imHost0.data, IM_SIZE * sizeof(uchar), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(im1, imHost1.data, IM_SIZE * sizeof(uchar), cudaMemcpyHostToDevice)); dim3 dimGrid(ceil(static_cast<float>(IMAGE_WIDTH)/NUM_THREADS), ceil(static_cast<float>(IMAGE_HEIGHT)/NUM_THREADS), 2); dim3 dimBlock(NUM_THREADS, NUM_THREADS); weightedAverage<<<dimGrid, dimBlock>>>(im0, im1); wac_gpu<<<dimGrid, dimBlock>>>(im0, im1); dimGrid = dim3(ceil(static_cast<float>(IMAGE_WIDTH)/NUM_THREADS), ceil(static_cast<float>(IMAGE_HEIGHT)/NUM_THREADS)); wcc<<<dimGrid, dimBlock>>>(im0, im1, depth); timer.Stop(); Mat depthIm(IMAGE_HEIGHT, IMAGE_WIDTH, CV_32F, Scalar(0.0)); //float *tdepth = new float[WAC_SIZE]; HANDLE_ERROR(cudaMemcpy(depthIm.data, depth, sizeof(float)*IMAGE_WIDTH*IMAGE_HEIGHT, cudaMemcpyDeviceToHost)); printf("\n"); depthIm.convertTo(depthIm, CV_8U, 255.0); printf("Your code ran in: %f msecs.\n", timer.Elapsed()); imshow("depth image", depthIm); waitKey(0); std::vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); imwrite("output_gpu.png", depthIm, compression_params); HANDLE_ERROR(cudaFree(im0)); HANDLE_ERROR(cudaFree(im1)); im0 = NULL; im1 = NULL; //HANDLE_ERROR(cudaFree(score)); HANDLE_ERROR(cudaFree(depth));//*/ return 0; }
6184847cfe9407acb9f676916497e9ece36a5b39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" #include "THHReduceApplyUtils.cuh" __global__ void SpatialReflectionPadding_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.getSize(2) * output.getSize(3)) { return; } int outputPointX = outputPointId % output.getSize(3); int outputPointY = outputPointId / output.getSize(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = abs(outputPointX - padL) - abs(outputPointX - (input.getSize(3) + padL - 1)) - outputPointX + 2 * padL + input.getSize(3) - 1 - oStartX + iStartX; int inputPointY = abs(outputPointY - padT) - abs(outputPointY - (input.getSize(2) + padT - 1)) - outputPointY + 2 * padT + input.getSize(2) - 1 - oStartY + iStartY; float valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } void THNN_CudaSpatialReflectionPadding_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, int padL, int padR, int padT, int padB ) { THArgCheck(THC_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = THCudaTensor_nDimension(state, input); THArgCheck(numInputDims == 3 || numInputDims == 4, 2, "input must be 3 or 4-dimensional"); if (numInputDims == 4) { numBatch = THCudaTensor_size(state, input, 0); planeDim++; dimh++; dimw++; } int numPlanes = THCudaTensor_size(state, input, planeDim); int inputH = THCudaTensor_size(state, input, dimh); int inputW = THCudaTensor_size(state, input, dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; THCDeviceTensor<float, 4> devInput; THCDeviceTensor<float, 4> devOutput; if (numInputDims == 3) { THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>(); devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>(); } else { THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 4>(state, input); devOutput = toDeviceTensor<float, 4>(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.getSize(1), devOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( SpatialReflectionPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state), devInput, devOutput, padT, padB, padL, padR); } __global__ void SpatialReflectionPadding_updateGradInput( THCDeviceTensor<float, 4> gradInput, THCDeviceTensor<float, 4> gradOutput, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.getSize(2) * gradOutput.getSize(3)) { return; } int outputPointX = outputPointId % gradOutput.getSize(3); int outputPointY = outputPointId / gradOutput.getSize(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = abs(outputPointX - padL) - abs(outputPointX - (gradInput.getSize(3) + padL - 1)) - outputPointX + 2 * padL + gradInput.getSize(3) - 1 - oStartX + iStartX; int inputPointY = abs(outputPointY - padT) - abs(outputPointY - (gradInput.getSize(2) + padT - 1)) - outputPointY + 2 * padT + gradInput.getSize(2) - 1 - oStartY + iStartY; float valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } void THNN_CudaSpatialReflectionPadding_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, int padL, int padR, int padT, int padB) { THArgCheck(THC_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); THArgCheck(THC_canUse32BitIndexMath(state, gradOutput), 3, "output gradient tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = THCudaTensor_nDimension(state, input); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); THCDeviceTensor<float, 4> devGradInput; THCDeviceTensor<float, 4> devGradOutput; if (numInputDims == 3) { devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>(); devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>(); } else { devGradInput = toDeviceTensor<float, 4>(state, gradInput); devGradOutput = toDeviceTensor<float, 4>(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.getSize(1), devGradOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( SpatialReflectionPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state), devGradInput, devGradOutput, padT, padB, padL, padR); }
6184847cfe9407acb9f676916497e9ece36a5b39.cu
#include "THCUNN.h" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" #include "THCReduceApplyUtils.cuh" __global__ void SpatialReflectionPadding_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.getSize(2) * output.getSize(3)) { return; } int outputPointX = outputPointId % output.getSize(3); int outputPointY = outputPointId / output.getSize(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = abs(outputPointX - padL) - abs(outputPointX - (input.getSize(3) + padL - 1)) - outputPointX + 2 * padL + input.getSize(3) - 1 - oStartX + iStartX; int inputPointY = abs(outputPointY - padT) - abs(outputPointY - (input.getSize(2) + padT - 1)) - outputPointY + 2 * padT + input.getSize(2) - 1 - oStartY + iStartY; float valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } void THNN_CudaSpatialReflectionPadding_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, int padL, int padR, int padT, int padB ) { THArgCheck(THC_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = THCudaTensor_nDimension(state, input); THArgCheck(numInputDims == 3 || numInputDims == 4, 2, "input must be 3 or 4-dimensional"); if (numInputDims == 4) { numBatch = THCudaTensor_size(state, input, 0); planeDim++; dimh++; dimw++; } int numPlanes = THCudaTensor_size(state, input, planeDim); int inputH = THCudaTensor_size(state, input, dimh); int inputW = THCudaTensor_size(state, input, dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; THCDeviceTensor<float, 4> devInput; THCDeviceTensor<float, 4> devOutput; if (numInputDims == 3) { THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>(); devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>(); } else { THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 4>(state, input); devOutput = toDeviceTensor<float, 4>(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.getSize(1), devOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); SpatialReflectionPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>( devInput, devOutput, padT, padB, padL, padR); } __global__ void SpatialReflectionPadding_updateGradInput( THCDeviceTensor<float, 4> gradInput, THCDeviceTensor<float, 4> gradOutput, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.getSize(2) * gradOutput.getSize(3)) { return; } int outputPointX = outputPointId % gradOutput.getSize(3); int outputPointY = outputPointId / gradOutput.getSize(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = abs(outputPointX - padL) - abs(outputPointX - (gradInput.getSize(3) + padL - 1)) - outputPointX + 2 * padL + gradInput.getSize(3) - 1 - oStartX + iStartX; int inputPointY = abs(outputPointY - padT) - abs(outputPointY - (gradInput.getSize(2) + padT - 1)) - outputPointY + 2 * padT + gradInput.getSize(2) - 1 - oStartY + iStartY; float valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } void THNN_CudaSpatialReflectionPadding_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, int padL, int padR, int padT, int padB) { THArgCheck(THC_canUse32BitIndexMath(state, input), 2, "input tensor must fit into 32-bit index math"); THArgCheck(THC_canUse32BitIndexMath(state, gradOutput), 3, "output gradient tensor must fit into 32-bit index math"); int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = THCudaTensor_nDimension(state, input); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); THCDeviceTensor<float, 4> devGradInput; THCDeviceTensor<float, 4> devGradOutput; if (numInputDims == 3) { devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>(); devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>(); } else { devGradInput = toDeviceTensor<float, 4>(state, gradInput); devGradOutput = toDeviceTensor<float, 4>(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.getSize(1), devGradOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); SpatialReflectionPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>( devGradInput, devGradOutput, padT, padB, padL, padR); }
c875d933fdc41714332906d1048a9ded7e6f83fa.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdio> #include <cstring> #include <string> #include <algorithm> #include <iostream> #include <cstdlib> // #include <unistd.h> // #include <windows.h> // #include <unistd.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> // #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> using namespace std; typedef double ld; typedef long long LL; const int chunk_size = 1<<16; namespace io_impl { inline bool maybe_digit(char c) { return c >= '0' && c <= '9'; } struct io_s { private: FILE *fin; FILE *fout; bool negative; bool ok; char ch; inline char next_char() { static char buf[100000], *p1 = buf, *p2 = buf; return p1 == p2 && (p2 = (p1 = buf) + fread(buf, 1, 100000, fin), p1 == p2) ? EOF : *p1++; } public: void init(FILE *_in, FILE *_out) { fin = _in; fout = _out; ch = next_char(); ok = true; } template <typename T> bool run(T &_v) { _v = 0; while (!maybe_digit(ch) && ch != EOF) ch = next_char(); if (ch == EOF) return ok = false; do { _v = (_v << 1) + (_v << 3) + ch - '0'; } while (maybe_digit(ch = next_char())); return true; } template <typename T> bool rd(T &_v) { negative = false; _v = 0; while (!maybe_digit(ch) && ch != EOF) { negative = ch == '-'; ch = next_char(); } if (ch == EOF) return ok = false; do { _v = (_v * 10) + (ch - '0'); } while (maybe_digit(ch = next_char())); static double _map[] = {1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6}; if (ch == '.') { int tp = 0; while (maybe_digit(ch = next_char())) { _v = (_v * 10) + (ch - '0'); ++tp; } _v *= _map[tp]; } if (negative) _v = -_v; return true; } }; } // namespace io_impl using namespace io_impl; io_s iokb; namespace output { const int OutputBufferSize = 1 << 20; char buffer[OutputBufferSize]; char *s = buffer; inline void flush() { fwrite(buffer, 1, s-buffer, stdout); s = buffer; fflush(stdout); } inline void print(const char ch) { // putchar(ch); return; if (s-buffer>OutputBufferSize-2) flush(); *s++ = ch; } inline void print(char *str) { while (*str!=0) print(char(*str++)); } inline void print(int x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; // if (x<0) print('-'), x=-x; // if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(LL x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(ld v) { // printf("%.2f", x); // static int stk[70], tp; // tp = 0; if (v < 1e18) { if (fabs(v) < 0.005) { print('0'); return; } else { LL x = (LL)floor(v * 100 + 0.5); if (x<0) print('-'), x=-x; // cerr << "x=" << x << endl; exit(0); print((LL)(x / 100)); print('.'); print((char)(x / 10 % 10 + '0')); print((char)(x % 10 + '0')); } } else { static char buf[30]; sprintf(buf, "%.2lf", v); print(buf); } } } struct ios { inline ios & operator >> (int &x){ iokb.run(x); return *this; } inline ios &operator>>(ld &x) { iokb.rd(x); return *this; } } io; inline void handleCudaError(hipError_t err, string name = "fuck") { if (err != hipSuccess) { cerr << name << endl; cerr << hipGetErrorString(err) << endl; exit(0); } } const int B = 8; ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c; int an, am, bn, bm; int n, m; void copyMatrix(ld *&src, ld *&dst, int n, int m) { int size = sizeof(ld) * n * m; src = (ld*)malloc(size); for (int i=0; i<n; ++i) for (int j=0; j<m; ++j) io >> src[i * m + j]; handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix"); handleCudaError(hipMemcpy(dst, src, size, hipMemcpyHostToDevice), "memcpy in copyMatrix"); } void copyMatrixAsync(ld *&src, ld *&dst, int n, int m, hipStream_t &stream) { int size = sizeof(ld) * n * m; handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix"); handleCudaError(hipMemcpyAsync(dst, src, size, hipMemcpyHostToDevice, stream), "memcpyasync in copyMatrix"); } template<typename T> __global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) { int index = blockDim.x * blockIdx.x + threadIdx.x; int i = index / bm, j = index % bm; if (i >= an || j >= bm) return; register ld sum = 0; int basea = i * am; for (int k=0; k<am; ++k) sum += d_a[basea + k] * d_b[k * bm + j]; d_c[i * bm + j] = sum; // int index = threadIdx.x; // if (index < an * bm) // d_c[index] = 1; } void simk(int grids, int block_size, ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) { for (int blockIdxx=0; blockIdxx<grids; ++blockIdxx) { for (int threadIdxx=0; threadIdxx<block_size; ++threadIdxx) { // printf("%d %d\n", blockIdxx, threadIdxx); int blockid = blockIdxx, threadid = threadIdxx; int i = threadid / B, j = threadid % B, tbm = (bm + B - 1) / B, tam = (am + B - 1) / B; int rowInA = blockid / tam * B + i; int colInB = blockid % tbm * B + j; // if (i == 1 && j == 0) puts("FUCK"); printf("blockid=%d, threadid=%d, i=%d, j=%d, rowInA=%d, colInB=%d, an=%d, bm=%d, block_size=%d, B=%d, am=%d\n", blockIdxx, threadIdxx, i, j, rowInA, colInB, an, bm, block_size, B, am); if (rowInA < an && j < am) printf("fill a[%d][%d]\n", i, j); if (i < am && colInB < bm) printf("fill b[%d][%d]\n", i, j); if (rowInA < an && colInB < bm) printf("fill c[%d][%d]\n", rowInA, colInB); } } // exit(0); } __global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) { __shared__ ld a[B][B], b[B][B]; int blockid = blockIdx.x, threadid = threadIdx.x; int i = threadid / B, j = threadid % B; int tbm = (bm + B - 1) / B; int rowInA = blockid / tbm * B + i; int colInB = blockid % tbm * B + j; ld sum = 0; for (int sub=0; sub<(am + B - 1) / B; ++sub) { int x = rowInA, y = sub * B + j; if (x < an && y < am) a[i][j] = d_a[x * am + y]; else a[i][j] = 0; x = sub * B + i; y = colInB; if (x < am && y < bm) b[i][j] = d_b[x * bm + y]; else b[i][j] = 0; __syncthreads(); for (int k=0; k<B; ++k) sum += a[i][k] * b[k][j]; __syncthreads(); } if (rowInA < an && colInB < bm) d_c[(rowInA) * bm + colInB] = sum; } void outputMatrix(ld *a, int n, int m) { for (int i=0; i<n; ++i) { int base = i * m; output::print(a[base]); for (int j=1; j<m; ++j) { output::print(','); output::print(a[base + j]); } output::print('\n'); } } void outputinterval(ld *c, int l, int r) { if (l == 0) { output::print(c[l++]); } for (register int i=l; i<r; ++i) { if (i % m == 0) output::print('\n'); else output::print(','); output::print(c[i]); } } void outputMatrixAsync(ld *&a, ld *&d_a, int n, int m) { int st = 0, ed = n * m; // printf("st=%d ed=%d, a=%p\n", st, ed, a); hipStream_t stream[2]; int mask = 0; hipStreamCreate(&stream[0]); hipStreamCreate(&stream[1]); int size; for (; st<ed; st+=size, mask^=1) { size = min(chunk_size, ed - st); // printf("st=%d st+size=%d, mask=%d\n", st, st+size, mask); // handleCudaError(hipMemcpy(a + st, d_a + st, size * sizeof(ld), hipMemcpyDeviceToHost)); handleCudaError(hipMemcpyAsync(a + st, d_a + st, size * sizeof(ld), hipMemcpyDeviceToHost, stream[mask])); // exit(0); if (st - chunk_size >= 0) { // printf("%d %d\n",st-chunk_size, st); handleCudaError(hipStreamSynchronize(stream[mask^1])); outputinterval(a, st-chunk_size, st); } } st -= size; // sleep(1000); handleCudaError(hipStreamSynchronize(stream[0]), "sync stream0 last"); handleCudaError(hipStreamSynchronize(stream[1]), "sync stream1 last"); outputinterval(a, st, ed); output::print('\n'); } void build(ld *&h, ld *&d, int n, int m, hipStream_t &s) { handleCudaError(hipHostMalloc(&h, sizeof(ld) * n * m, hipHostMallocDefault)); for (int i=0; i<n; ++i) { for (int j=0; j<m; ++j) { io >> h[i * m + j]; } } copyMatrixAsync(h, d, n, m, s); } int main() { freopen("output.txt", "w", stdout); iokb.init(fopen("input.txt", "r"), fopen("output.txt", "w")); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); cerr << prop.name << endl; // hipStream_t mainstream; // hipStreamCreate(&mainstream); // #endif io >> an >> am; // build(h_a, d_a, an, am, mainstream); copyMatrix(h_a, d_a, an, am); io >> bn >> bm; // build(h_b, d_b, bn, bm, mainstream); copyMatrix(h_b, d_b, bn, bm); handleCudaError(hipMalloc(&d_c, sizeof(ld) * an * bm), "allocate for d_c"); // handleCudaError(hipStreamSynchronize(mainstream)); int m = (an + B - 1) / B, n = (am + B - 1) / B, k = (bm + B - 1) / B; // simk(m * k, B * B, d_a, d_b, d_c, an, bm, am); fprintf(stderr, "stderr: m=%d, n=%d, k=%d\n", m, n, k); hipLaunchKernelGGL(( matrixMult2), dim3(m * k), dim3(B * B), 0, 0, d_a, d_b, d_c, an, bm, am); handleCudaError(hipGetLastError(), "kernel error"); fprintf(stderr, "stderr: running kernel completed\n"); h_c = (ld*)malloc(sizeof(ld) * an * bm); // handleCudaError(hipHostMalloc(&h_c, sizeof(ld) * an * bm,hipHostMallocDefault), "hostalloc for c"); handleCudaError(hipMemcpy(h_c, d_c, sizeof(ld) * an * bm, hipMemcpyDeviceToHost), "mem back"); outputMatrix(h_c, an, bm); output::flush(); return 0; }
c875d933fdc41714332906d1048a9ded7e6f83fa.cu
#include <cmath> #include <cstdio> #include <cstring> #include <string> #include <algorithm> #include <iostream> #include <cstdlib> // #include <unistd.h> // #include <windows.h> // #include <unistd.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> // #include <device_functions.h> #include <cuda_runtime_api.h> using namespace std; typedef double ld; typedef long long LL; const int chunk_size = 1<<16; namespace io_impl { inline bool maybe_digit(char c) { return c >= '0' && c <= '9'; } struct io_s { private: FILE *fin; FILE *fout; bool negative; bool ok; char ch; inline char next_char() { static char buf[100000], *p1 = buf, *p2 = buf; return p1 == p2 && (p2 = (p1 = buf) + fread(buf, 1, 100000, fin), p1 == p2) ? EOF : *p1++; } public: void init(FILE *_in, FILE *_out) { fin = _in; fout = _out; ch = next_char(); ok = true; } template <typename T> bool run(T &_v) { _v = 0; while (!maybe_digit(ch) && ch != EOF) ch = next_char(); if (ch == EOF) return ok = false; do { _v = (_v << 1) + (_v << 3) + ch - '0'; } while (maybe_digit(ch = next_char())); return true; } template <typename T> bool rd(T &_v) { negative = false; _v = 0; while (!maybe_digit(ch) && ch != EOF) { negative = ch == '-'; ch = next_char(); } if (ch == EOF) return ok = false; do { _v = (_v * 10) + (ch - '0'); } while (maybe_digit(ch = next_char())); static double _map[] = {1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6}; if (ch == '.') { int tp = 0; while (maybe_digit(ch = next_char())) { _v = (_v * 10) + (ch - '0'); ++tp; } _v *= _map[tp]; } if (negative) _v = -_v; return true; } }; } // namespace io_impl using namespace io_impl; io_s iokb; namespace output { const int OutputBufferSize = 1 << 20; char buffer[OutputBufferSize]; char *s = buffer; inline void flush() { fwrite(buffer, 1, s-buffer, stdout); s = buffer; fflush(stdout); } inline void print(const char ch) { // putchar(ch); return; if (s-buffer>OutputBufferSize-2) flush(); *s++ = ch; } inline void print(char *str) { while (*str!=0) print(char(*str++)); } inline void print(int x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; // if (x<0) print('-'), x=-x; // if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(LL x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(ld v) { // printf("%.2f", x); // static int stk[70], tp; // tp = 0; if (v < 1e18) { if (fabs(v) < 0.005) { print('0'); return; } else { LL x = (LL)floor(v * 100 + 0.5); if (x<0) print('-'), x=-x; // cerr << "x=" << x << endl; exit(0); print((LL)(x / 100)); print('.'); print((char)(x / 10 % 10 + '0')); print((char)(x % 10 + '0')); } } else { static char buf[30]; sprintf(buf, "%.2lf", v); print(buf); } } } struct ios { inline ios & operator >> (int &x){ iokb.run(x); return *this; } inline ios &operator>>(ld &x) { iokb.rd(x); return *this; } } io; inline void handleCudaError(cudaError_t err, string name = "fuck") { if (err != cudaSuccess) { cerr << name << endl; cerr << cudaGetErrorString(err) << endl; exit(0); } } const int B = 8; ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c; int an, am, bn, bm; int n, m; void copyMatrix(ld *&src, ld *&dst, int n, int m) { int size = sizeof(ld) * n * m; src = (ld*)malloc(size); for (int i=0; i<n; ++i) for (int j=0; j<m; ++j) io >> src[i * m + j]; handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix"); handleCudaError(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice), "memcpy in copyMatrix"); } void copyMatrixAsync(ld *&src, ld *&dst, int n, int m, cudaStream_t &stream) { int size = sizeof(ld) * n * m; handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix"); handleCudaError(cudaMemcpyAsync(dst, src, size, cudaMemcpyHostToDevice, stream), "memcpyasync in copyMatrix"); } template<typename T> __global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) { int index = blockDim.x * blockIdx.x + threadIdx.x; int i = index / bm, j = index % bm; if (i >= an || j >= bm) return; register ld sum = 0; int basea = i * am; for (int k=0; k<am; ++k) sum += d_a[basea + k] * d_b[k * bm + j]; d_c[i * bm + j] = sum; // int index = threadIdx.x; // if (index < an * bm) // d_c[index] = 1; } void simk(int grids, int block_size, ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) { for (int blockIdxx=0; blockIdxx<grids; ++blockIdxx) { for (int threadIdxx=0; threadIdxx<block_size; ++threadIdxx) { // printf("%d %d\n", blockIdxx, threadIdxx); int blockid = blockIdxx, threadid = threadIdxx; int i = threadid / B, j = threadid % B, tbm = (bm + B - 1) / B, tam = (am + B - 1) / B; int rowInA = blockid / tam * B + i; int colInB = blockid % tbm * B + j; // if (i == 1 && j == 0) puts("FUCK"); printf("blockid=%d, threadid=%d, i=%d, j=%d, rowInA=%d, colInB=%d, an=%d, bm=%d, block_size=%d, B=%d, am=%d\n", blockIdxx, threadIdxx, i, j, rowInA, colInB, an, bm, block_size, B, am); if (rowInA < an && j < am) printf("fill a[%d][%d]\n", i, j); if (i < am && colInB < bm) printf("fill b[%d][%d]\n", i, j); if (rowInA < an && colInB < bm) printf("fill c[%d][%d]\n", rowInA, colInB); } } // exit(0); } __global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am) { __shared__ ld a[B][B], b[B][B]; int blockid = blockIdx.x, threadid = threadIdx.x; int i = threadid / B, j = threadid % B; int tbm = (bm + B - 1) / B; int rowInA = blockid / tbm * B + i; int colInB = blockid % tbm * B + j; ld sum = 0; for (int sub=0; sub<(am + B - 1) / B; ++sub) { int x = rowInA, y = sub * B + j; if (x < an && y < am) a[i][j] = d_a[x * am + y]; else a[i][j] = 0; x = sub * B + i; y = colInB; if (x < am && y < bm) b[i][j] = d_b[x * bm + y]; else b[i][j] = 0; __syncthreads(); for (int k=0; k<B; ++k) sum += a[i][k] * b[k][j]; __syncthreads(); } if (rowInA < an && colInB < bm) d_c[(rowInA) * bm + colInB] = sum; } void outputMatrix(ld *a, int n, int m) { for (int i=0; i<n; ++i) { int base = i * m; output::print(a[base]); for (int j=1; j<m; ++j) { output::print(','); output::print(a[base + j]); } output::print('\n'); } } void outputinterval(ld *c, int l, int r) { if (l == 0) { output::print(c[l++]); } for (register int i=l; i<r; ++i) { if (i % m == 0) output::print('\n'); else output::print(','); output::print(c[i]); } } void outputMatrixAsync(ld *&a, ld *&d_a, int n, int m) { int st = 0, ed = n * m; // printf("st=%d ed=%d, a=%p\n", st, ed, a); cudaStream_t stream[2]; int mask = 0; cudaStreamCreate(&stream[0]); cudaStreamCreate(&stream[1]); int size; for (; st<ed; st+=size, mask^=1) { size = min(chunk_size, ed - st); // printf("st=%d st+size=%d, mask=%d\n", st, st+size, mask); // handleCudaError(cudaMemcpy(a + st, d_a + st, size * sizeof(ld), cudaMemcpyDeviceToHost)); handleCudaError(cudaMemcpyAsync(a + st, d_a + st, size * sizeof(ld), cudaMemcpyDeviceToHost, stream[mask])); // exit(0); if (st - chunk_size >= 0) { // printf("%d %d\n",st-chunk_size, st); handleCudaError(cudaStreamSynchronize(stream[mask^1])); outputinterval(a, st-chunk_size, st); } } st -= size; // sleep(1000); handleCudaError(cudaStreamSynchronize(stream[0]), "sync stream0 last"); handleCudaError(cudaStreamSynchronize(stream[1]), "sync stream1 last"); outputinterval(a, st, ed); output::print('\n'); } void build(ld *&h, ld *&d, int n, int m, cudaStream_t &s) { handleCudaError(cudaHostAlloc(&h, sizeof(ld) * n * m, cudaHostAllocDefault)); for (int i=0; i<n; ++i) { for (int j=0; j<m; ++j) { io >> h[i * m + j]; } } copyMatrixAsync(h, d, n, m, s); } int main() { freopen("output.txt", "w", stdout); iokb.init(fopen("input.txt", "r"), fopen("output.txt", "w")); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); cerr << prop.name << endl; // cudaStream_t mainstream; // cudaStreamCreate(&mainstream); // #endif io >> an >> am; // build(h_a, d_a, an, am, mainstream); copyMatrix(h_a, d_a, an, am); io >> bn >> bm; // build(h_b, d_b, bn, bm, mainstream); copyMatrix(h_b, d_b, bn, bm); handleCudaError(cudaMalloc(&d_c, sizeof(ld) * an * bm), "allocate for d_c"); // handleCudaError(cudaStreamSynchronize(mainstream)); int m = (an + B - 1) / B, n = (am + B - 1) / B, k = (bm + B - 1) / B; // simk(m * k, B * B, d_a, d_b, d_c, an, bm, am); fprintf(stderr, "stderr: m=%d, n=%d, k=%d\n", m, n, k); matrixMult2<<<m * k, B * B>>>(d_a, d_b, d_c, an, bm, am); handleCudaError(cudaGetLastError(), "kernel error"); fprintf(stderr, "stderr: running kernel completed\n"); h_c = (ld*)malloc(sizeof(ld) * an * bm); // handleCudaError(cudaHostAlloc(&h_c, sizeof(ld) * an * bm,cudaHostAllocDefault), "hostalloc for c"); handleCudaError(cudaMemcpy(h_c, d_c, sizeof(ld) * an * bm, cudaMemcpyDeviceToHost), "mem back"); outputMatrix(h_c, an, bm); output::flush(); return 0; }
c86394ba776ff1e1ed7e15112434d2c383161af4.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include<stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #define N 1000000 #define RADIUS 3 int checkResults(int startElem, int endElem, float* cudaRes, float* res) { int nDiffs=0; const float smallVal = 0.000001f; for(int i=startElem; i<endElem; i++) if(fabs(cudaRes[i]-res[i])>smallVal) nDiffs++; return nDiffs; } void initializeWeights(float* weights, int rad) { // for now hardcoded for RADIUS=3 weights[0] = 0.50f; weights[1] = 0.75f; weights[2] = 1.25f; weights[3] = 2.00f; weights[4] = 1.25f; weights[5] = 0.75f; weights[6] = 0.50f; } void initializeArray(float* arr, int nElements) { const int myMinNumber = -5; const int myMaxNumber = 5; srand(time(NULL)); for( int i=0; i<nElements; i++) arr[i] = (float)(rand() % (myMaxNumber - myMinNumber + 1) + myMinNumber); } void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in, float *out) { for (int i = sIdx; i < eIdx; i++) { out[i] = 0; //loop over all elements in the stencil for (int j = -RADIUS; j <= RADIUS; j++) { out[i] += weights[j + RADIUS] * in[i + j]; } out[i] = out[i] / (2 * RADIUS + 1); } } __global__ void applyStencil1D(int sIdx, int eIdx, const float *weights, float *in, float *out) { int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x; if( i < eIdx ) { float result = 0.f; result += weights[0]*in[i-3]; result += weights[1]*in[i-2]; result += weights[2]*in[i-1]; result += weights[3]*in[i]; result += weights[4]*in[i+1]; result += weights[5]*in[i+2]; result += weights[6]*in[i+3]; result /=7.f; out[i] = result; } } int main() { int size = N * sizeof(float); int wsize = (2 * RADIUS + 1) * sizeof(float); //allocate resources float *weights = (float *)malloc(wsize); float *in = (float *)malloc(size); float *out = (float *)malloc(size); float *cuda_out= (float *)malloc(size); initializeWeights(weights, RADIUS); initializeArray(in, N); float *d_weights; hipMalloc(&d_weights, wsize); float *d_in; hipMalloc(&d_in, size); float *d_out; hipMalloc(&d_out, size); hipMemcpy(d_weights,weights,wsize,hipMemcpyHostToDevice); hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( applyStencil1D), dim3((N+511)/512), dim3(512), 0, 0, RADIUS, N-RADIUS, d_weights, d_in, d_out); applyStencil1D_SEQ(RADIUS, N-RADIUS, weights, in, out); hipMemcpy(cuda_out, d_out, size, hipMemcpyDeviceToHost); int nDiffs = checkResults(RADIUS, N-RADIUS, cuda_out, out); nDiffs==0? std::cout<<"Looks good.\n": std::cout<<"Doesn't look good: " << nDiffs << "differences\n"; //free resources free(weights); free(in); free(out); free(cuda_out); hipFree(d_weights); hipFree(d_in); hipFree(d_out); return 0; }
c86394ba776ff1e1ed7e15112434d2c383161af4.cu
#include<iostream> #include<stdlib.h> #include <cuda.h> #include <math.h> #define N 1000000 #define RADIUS 3 int checkResults(int startElem, int endElem, float* cudaRes, float* res) { int nDiffs=0; const float smallVal = 0.000001f; for(int i=startElem; i<endElem; i++) if(fabs(cudaRes[i]-res[i])>smallVal) nDiffs++; return nDiffs; } void initializeWeights(float* weights, int rad) { // for now hardcoded for RADIUS=3 weights[0] = 0.50f; weights[1] = 0.75f; weights[2] = 1.25f; weights[3] = 2.00f; weights[4] = 1.25f; weights[5] = 0.75f; weights[6] = 0.50f; } void initializeArray(float* arr, int nElements) { const int myMinNumber = -5; const int myMaxNumber = 5; srand(time(NULL)); for( int i=0; i<nElements; i++) arr[i] = (float)(rand() % (myMaxNumber - myMinNumber + 1) + myMinNumber); } void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in, float *out) { for (int i = sIdx; i < eIdx; i++) { out[i] = 0; //loop over all elements in the stencil for (int j = -RADIUS; j <= RADIUS; j++) { out[i] += weights[j + RADIUS] * in[i + j]; } out[i] = out[i] / (2 * RADIUS + 1); } } __global__ void applyStencil1D(int sIdx, int eIdx, const float *weights, float *in, float *out) { int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x; if( i < eIdx ) { float result = 0.f; result += weights[0]*in[i-3]; result += weights[1]*in[i-2]; result += weights[2]*in[i-1]; result += weights[3]*in[i]; result += weights[4]*in[i+1]; result += weights[5]*in[i+2]; result += weights[6]*in[i+3]; result /=7.f; out[i] = result; } } int main() { int size = N * sizeof(float); int wsize = (2 * RADIUS + 1) * sizeof(float); //allocate resources float *weights = (float *)malloc(wsize); float *in = (float *)malloc(size); float *out = (float *)malloc(size); float *cuda_out= (float *)malloc(size); initializeWeights(weights, RADIUS); initializeArray(in, N); float *d_weights; cudaMalloc(&d_weights, wsize); float *d_in; cudaMalloc(&d_in, size); float *d_out; cudaMalloc(&d_out, size); cudaMemcpy(d_weights,weights,wsize,cudaMemcpyHostToDevice); cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); applyStencil1D<<<(N+511)/512, 512>>>(RADIUS, N-RADIUS, d_weights, d_in, d_out); applyStencil1D_SEQ(RADIUS, N-RADIUS, weights, in, out); cudaMemcpy(cuda_out, d_out, size, cudaMemcpyDeviceToHost); int nDiffs = checkResults(RADIUS, N-RADIUS, cuda_out, out); nDiffs==0? std::cout<<"Looks good.\n": std::cout<<"Doesn't look good: " << nDiffs << "differences\n"; //free resources free(weights); free(in); free(out); free(cuda_out); cudaFree(d_weights); cudaFree(d_in); cudaFree(d_out); return 0; }
dfc4fcac77ef33e1a0048be805308948d77b1af5.hip
// !!! This is a file automatically generated by hipify!!! /* * ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This example is based on cvAdvDiff_bnd * example by Scott D. Cohen, Alan C. * Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * with the program for its solution by CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the CVBAND band linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #include <sundials/sundials_math.h> /* definition of ABS and EXP */ #include <nvector/nvector_cuda.h> /* Real Constants */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* * CUDA kernels */ __global__ void fKernel(const realtype *u, realtype *udot, sunindextype MX, sunindextype MY, realtype hordc, realtype horac, realtype verdc) { realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff; sunindextype i, j, tid; /* Loop over all grid points. */ tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < MX*MY) { i = tid/MY; j = tid%MY; uij = u[tid]; udn = (j == 0) ? ZERO : u[tid - 1]; uup = (j == MY-1) ? ZERO : u[tid + 1]; ult = (i == 0) ? ZERO : u[tid - MY]; urt = (i == MX-1) ? ZERO : u[tid + MY]; /* Set diffusion and advection terms and load into udot */ hdiff = hordc*(ult - TWO*uij + urt); hadv = horac*(urt - ult); vdiff = verdc*(uup - TWO*uij + udn); udot[tid] = hdiff + hadv + vdiff; } } __global__ void jtvKernel(const realtype *vdata, realtype *Jvdata, sunindextype MX, sunindextype MY, realtype hordc, realtype horac, realtype verdc) { sunindextype i, j, tid; /* Loop over all grid points. */ tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < MX*MY) { i = tid/MY; j = tid%MY; /* set the tid-th element of Jv */ Jvdata[tid] = -TWO*(verdc+hordc) * vdata[tid]; if (i != 0) Jvdata[tid] += (hordc - horac) * vdata[tid-MY]; if (i != MX-1) Jvdata[tid] += (hordc + horac) * vdata[tid+MY]; if (j != 0) Jvdata[tid] += verdc * vdata[tid-1]; if (j != MY-1) Jvdata[tid] += verdc * vdata[tid+1]; } } /* Type : _UserData (contains model and discretization parameters) */ struct _UserData { sunindextype MX, MY, NEQ; realtype dx, dy, XMAX, YMAX; realtype hdcoef, hacoef, vdcoef; }; typedef _UserData *UserData; /* Problem setup and initialization functions */ static UserData SetUserData(int argc, char** argv); static void SetIC(N_Vector u, UserData data); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp); /* Private Helper Functions */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_retval(void *returnvalue, const char *funcname, int opt); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char** argv) { realtype reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNLinearSolver LS; void *cvode_mem; int iout, retval; long int nst; hipStream_t stream; hipError_t cuerr; u = NULL; data = NULL; LS = NULL; cvode_mem = NULL; /* optional: create a cudaStream to use with the CUDA NVector (otherwise the default stream is used) */ cuerr = hipStreamCreate(&stream); if(cuerr != hipSuccess) { printf("Error: hipStreamCreate() failed\n"); return(1); } /* Set model parameters */ data = SetUserData(argc, argv); if(check_retval((void *)data, "malloc", 2)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; /* Create a CUDA vector with initial values */ u = N_VNew_Cuda(data->NEQ); /* Allocate u vector */ if(check_retval((void*)u, "N_VNew_Cuda", 0)) return(1); /* Use a non-default cuda stream for kernel execution */ N_VSetCudaStream_Cuda(u, &stream); SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula */ cvode_mem = CVodeCreate(CV_BDF); if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the initial time T0, and * the initial dependent variable vector u. */ retval = CVodeInit(cvode_mem, f, T0, u); if(check_retval(&retval, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ retval = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_retval(&retval, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ retval = CVodeSetUserData(cvode_mem, data); if(check_retval(&retval, "CVodeSetUserData", 1)) return(1); /* Create SPGMR solver structure without preconditioning * and the maximum Krylov dimension maxl */ LS = SUNLinSol_SPGMR(u, PREC_NONE, 0); if(check_retval(&retval, "SUNLinSol_SPGMR", 1)) return(1); /* Set CVode linear solver to LS */ retval = CVodeSetLinearSolver(cvode_mem, LS, NULL); if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1); /* Set the Jacobian-times-vector function */ retval = CVodeSetJacTimes(cvode_mem, NULL, jtv); if(check_retval(&retval, "CVodeSetJacTimesVecFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax, data); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_retval(&retval, "CVode", 1)) break; umax = N_VMaxNorm(u); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ N_VDestroy(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ free(data); /* Free the user data */ cuerr = hipStreamDestroy(stream); /* Free and cleanup the CUDA stream */ if(cuerr != hipSuccess) { printf("Error: hipStreamDestroy() failed\n"); return(1); } return(0); } /* *------------------------------------------- * Problem setup and initialization functions *------------------------------------------- */ /* Set model and discretization parameters */ UserData SetUserData(int argc, char *argv[]) { const sunindextype MX = 10; const sunindextype MY = 5; const realtype XMAX = RCONST(2.0); /* domain boundaries */ const realtype YMAX = RCONST(1.0); /* Allocate user data structure */ UserData ud = (UserData) malloc(sizeof *ud); if(check_retval((void*) ud, "AllocUserData", 2)) return(NULL); ud->MX = MX; ud->MY = MY; ud->NEQ = MX*MY; ud->XMAX = XMAX; ud->YMAX = YMAX; ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */ ud->dy = YMAX/(MY+1); ud->hdcoef = ONE/(ud->dx*ud->dx); ud->hacoef = HALF/(TWO*ud->dx); ud->vdcoef = ONE/(ud->dy*ud->dy); return ud; } /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { /* Extract needed constants from data */ const realtype dx = data->dx; const realtype dy = data->dy; const realtype xmax = data->XMAX; const realtype ymax = data->YMAX; const sunindextype MY = data->MY; const sunindextype NEQ = data->NEQ; /* Extract pointer to solution vector data on the host */ realtype *udata = N_VGetHostArrayPointer_Cuda(u); sunindextype i, j, tid; realtype x, y; /* Load initial profile into u vector */ for (tid=0; tid < NEQ; tid++) { i = tid / MY; j = tid % MY; x = (i+1)*dx; y = (j+1)*dy; udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y); } N_VCopyToDevice_Cuda(u); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data) { UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *udata = N_VGetDeviceArrayPointer_Cuda(u); realtype *dudata = N_VGetDeviceArrayPointer_Cuda(udot); unsigned block = 256; unsigned grid = (MX*MY + block - 1) / block; hipLaunchKernelGGL(( fKernel), dim3(grid),dim3(block), 0, 0, udata, dudata, MX, MY, hordc, horac, verdc); return(0); } /* Jacobian-times-vector routine. */ static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp) { UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *vdata = N_VGetDeviceArrayPointer_Cuda(v); realtype *Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv); unsigned block = 256; unsigned grid = (MX*MY + block - 1) / block; N_VConst(ZERO, Jv); hipLaunchKernelGGL(( jtvKernel), dim3(grid),dim3(block), 0, 0, vdata, Jvdata, MX, MY, hordc, horac, verdc); return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %d X %d\n", data->MX, data->MY); printf("Total system size = %d\n", data->NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { long lenrw, leniw ; long lenrwLS, leniwLS; long int nst, nfe, nsetups, nni, ncfn, netf; long int nli, npe, nps, ncfl, nfeLS; int retval; retval = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw); check_retval(&retval, "CVodeGetWorkSpace", 1); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); retval = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_retval(&retval, "CVodeGetNumRhsEvals", 1); retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_retval(&retval, "CVodeGetNumLinSolvSetups", 1); retval = CVodeGetNumErrTestFails(cvode_mem, &netf); check_retval(&retval, "CVodeGetNumErrTestFails", 1); retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1); retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1); retval = CVodeGetLinWorkSpace(cvode_mem, &lenrwLS, &leniwLS); check_retval(&retval, "CVodeGetLinWorkSpace", 1); retval = CVodeGetNumLinIters(cvode_mem, &nli); check_retval(&retval, "CVodeGetNumLinIters", 1); retval = CVodeGetNumPrecEvals(cvode_mem, &npe); check_retval(&retval, "CVodeGetNumPrecEvals", 1); retval = CVodeGetNumPrecSolves(cvode_mem, &nps); check_retval(&retval, "CVodeGetNumPrecSolves", 1); retval = CVodeGetNumLinConvFails(cvode_mem, &ncfl); check_retval(&retval, "CVodeGetNumLinConvFails", 1); retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_retval(&retval, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics.. \n\n"); printf("lenrw = %5ld leniw = %5ld\n" , lenrw, leniw); printf("lenrwLS = %5ld leniwLS = %5ld\n" , lenrwLS, leniwLS); printf("nst = %5ld\n" , nst); printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS); printf("nni = %5ld nli = %5ld\n" , nni, nli); printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf); printf("npe = %5ld nps = %5ld\n" , npe, nps); printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns an integer value so check if retval >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, const char *funcname, int opt) { int *retval; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && returnvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if retval < 0 */ else if (opt == 1) { retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && returnvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
dfc4fcac77ef33e1a0048be805308948d77b1af5.cu
/* * ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This example is based on cvAdvDiff_bnd * example by Scott D. Cohen, Alan C. * Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * with the program for its solution by CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the CVBAND band linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #include <sundials/sundials_math.h> /* definition of ABS and EXP */ #include <nvector/nvector_cuda.h> /* Real Constants */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* * CUDA kernels */ __global__ void fKernel(const realtype *u, realtype *udot, sunindextype MX, sunindextype MY, realtype hordc, realtype horac, realtype verdc) { realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff; sunindextype i, j, tid; /* Loop over all grid points. */ tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < MX*MY) { i = tid/MY; j = tid%MY; uij = u[tid]; udn = (j == 0) ? ZERO : u[tid - 1]; uup = (j == MY-1) ? ZERO : u[tid + 1]; ult = (i == 0) ? ZERO : u[tid - MY]; urt = (i == MX-1) ? ZERO : u[tid + MY]; /* Set diffusion and advection terms and load into udot */ hdiff = hordc*(ult - TWO*uij + urt); hadv = horac*(urt - ult); vdiff = verdc*(uup - TWO*uij + udn); udot[tid] = hdiff + hadv + vdiff; } } __global__ void jtvKernel(const realtype *vdata, realtype *Jvdata, sunindextype MX, sunindextype MY, realtype hordc, realtype horac, realtype verdc) { sunindextype i, j, tid; /* Loop over all grid points. */ tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < MX*MY) { i = tid/MY; j = tid%MY; /* set the tid-th element of Jv */ Jvdata[tid] = -TWO*(verdc+hordc) * vdata[tid]; if (i != 0) Jvdata[tid] += (hordc - horac) * vdata[tid-MY]; if (i != MX-1) Jvdata[tid] += (hordc + horac) * vdata[tid+MY]; if (j != 0) Jvdata[tid] += verdc * vdata[tid-1]; if (j != MY-1) Jvdata[tid] += verdc * vdata[tid+1]; } } /* Type : _UserData (contains model and discretization parameters) */ struct _UserData { sunindextype MX, MY, NEQ; realtype dx, dy, XMAX, YMAX; realtype hdcoef, hacoef, vdcoef; }; typedef _UserData *UserData; /* Problem setup and initialization functions */ static UserData SetUserData(int argc, char** argv); static void SetIC(N_Vector u, UserData data); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp); /* Private Helper Functions */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_retval(void *returnvalue, const char *funcname, int opt); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char** argv) { realtype reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNLinearSolver LS; void *cvode_mem; int iout, retval; long int nst; cudaStream_t stream; cudaError_t cuerr; u = NULL; data = NULL; LS = NULL; cvode_mem = NULL; /* optional: create a cudaStream to use with the CUDA NVector (otherwise the default stream is used) */ cuerr = cudaStreamCreate(&stream); if(cuerr != cudaSuccess) { printf("Error: cudaStreamCreate() failed\n"); return(1); } /* Set model parameters */ data = SetUserData(argc, argv); if(check_retval((void *)data, "malloc", 2)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; /* Create a CUDA vector with initial values */ u = N_VNew_Cuda(data->NEQ); /* Allocate u vector */ if(check_retval((void*)u, "N_VNew_Cuda", 0)) return(1); /* Use a non-default cuda stream for kernel execution */ N_VSetCudaStream_Cuda(u, &stream); SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula */ cvode_mem = CVodeCreate(CV_BDF); if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the initial time T0, and * the initial dependent variable vector u. */ retval = CVodeInit(cvode_mem, f, T0, u); if(check_retval(&retval, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ retval = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_retval(&retval, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ retval = CVodeSetUserData(cvode_mem, data); if(check_retval(&retval, "CVodeSetUserData", 1)) return(1); /* Create SPGMR solver structure without preconditioning * and the maximum Krylov dimension maxl */ LS = SUNLinSol_SPGMR(u, PREC_NONE, 0); if(check_retval(&retval, "SUNLinSol_SPGMR", 1)) return(1); /* Set CVode linear solver to LS */ retval = CVodeSetLinearSolver(cvode_mem, LS, NULL); if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1); /* Set the Jacobian-times-vector function */ retval = CVodeSetJacTimes(cvode_mem, NULL, jtv); if(check_retval(&retval, "CVodeSetJacTimesVecFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax, data); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_retval(&retval, "CVode", 1)) break; umax = N_VMaxNorm(u); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ N_VDestroy(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ free(data); /* Free the user data */ cuerr = cudaStreamDestroy(stream); /* Free and cleanup the CUDA stream */ if(cuerr != cudaSuccess) { printf("Error: cudaStreamDestroy() failed\n"); return(1); } return(0); } /* *------------------------------------------- * Problem setup and initialization functions *------------------------------------------- */ /* Set model and discretization parameters */ UserData SetUserData(int argc, char *argv[]) { const sunindextype MX = 10; const sunindextype MY = 5; const realtype XMAX = RCONST(2.0); /* domain boundaries */ const realtype YMAX = RCONST(1.0); /* Allocate user data structure */ UserData ud = (UserData) malloc(sizeof *ud); if(check_retval((void*) ud, "AllocUserData", 2)) return(NULL); ud->MX = MX; ud->MY = MY; ud->NEQ = MX*MY; ud->XMAX = XMAX; ud->YMAX = YMAX; ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */ ud->dy = YMAX/(MY+1); ud->hdcoef = ONE/(ud->dx*ud->dx); ud->hacoef = HALF/(TWO*ud->dx); ud->vdcoef = ONE/(ud->dy*ud->dy); return ud; } /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { /* Extract needed constants from data */ const realtype dx = data->dx; const realtype dy = data->dy; const realtype xmax = data->XMAX; const realtype ymax = data->YMAX; const sunindextype MY = data->MY; const sunindextype NEQ = data->NEQ; /* Extract pointer to solution vector data on the host */ realtype *udata = N_VGetHostArrayPointer_Cuda(u); sunindextype i, j, tid; realtype x, y; /* Load initial profile into u vector */ for (tid=0; tid < NEQ; tid++) { i = tid / MY; j = tid % MY; x = (i+1)*dx; y = (j+1)*dy; udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y); } N_VCopyToDevice_Cuda(u); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data) { UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *udata = N_VGetDeviceArrayPointer_Cuda(u); realtype *dudata = N_VGetDeviceArrayPointer_Cuda(udot); unsigned block = 256; unsigned grid = (MX*MY + block - 1) / block; fKernel<<<grid,block>>>(udata, dudata, MX, MY, hordc, horac, verdc); return(0); } /* Jacobian-times-vector routine. */ static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp) { UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *vdata = N_VGetDeviceArrayPointer_Cuda(v); realtype *Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv); unsigned block = 256; unsigned grid = (MX*MY + block - 1) / block; N_VConst(ZERO, Jv); jtvKernel<<<grid,block>>>(vdata, Jvdata, MX, MY, hordc, horac, verdc); return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %d X %d\n", data->MX, data->MY); printf("Total system size = %d\n", data->NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { long lenrw, leniw ; long lenrwLS, leniwLS; long int nst, nfe, nsetups, nni, ncfn, netf; long int nli, npe, nps, ncfl, nfeLS; int retval; retval = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw); check_retval(&retval, "CVodeGetWorkSpace", 1); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); retval = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_retval(&retval, "CVodeGetNumRhsEvals", 1); retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_retval(&retval, "CVodeGetNumLinSolvSetups", 1); retval = CVodeGetNumErrTestFails(cvode_mem, &netf); check_retval(&retval, "CVodeGetNumErrTestFails", 1); retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1); retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1); retval = CVodeGetLinWorkSpace(cvode_mem, &lenrwLS, &leniwLS); check_retval(&retval, "CVodeGetLinWorkSpace", 1); retval = CVodeGetNumLinIters(cvode_mem, &nli); check_retval(&retval, "CVodeGetNumLinIters", 1); retval = CVodeGetNumPrecEvals(cvode_mem, &npe); check_retval(&retval, "CVodeGetNumPrecEvals", 1); retval = CVodeGetNumPrecSolves(cvode_mem, &nps); check_retval(&retval, "CVodeGetNumPrecSolves", 1); retval = CVodeGetNumLinConvFails(cvode_mem, &ncfl); check_retval(&retval, "CVodeGetNumLinConvFails", 1); retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_retval(&retval, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics.. \n\n"); printf("lenrw = %5ld leniw = %5ld\n" , lenrw, leniw); printf("lenrwLS = %5ld leniwLS = %5ld\n" , lenrwLS, leniwLS); printf("nst = %5ld\n" , nst); printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS); printf("nni = %5ld nli = %5ld\n" , nni, nli); printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf); printf("npe = %5ld nps = %5ld\n" , npe, nps); printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns an integer value so check if retval >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, const char *funcname, int opt) { int *retval; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && returnvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if retval < 0 */ else if (opt == 1) { retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && returnvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
1254805a932f72e94d0f2aba47b373279ffe1688.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "shrink_impl.h" namespace onnxruntime { namespace cuda { // Generic implementation of Shrink template <typename T> __global__ void _ShrinkKernel( const T* input_data, const float bias, const float lambda, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); T x = input_data[id]; if (x < -lambda) { output_data[id] = (T)(x + bias); } else if (x > lambda) { output_data[id] = (T)(x - bias); } else { output_data[id] = (T)0; } } // Specialized implementation for 'half' type // the idea is to convert 'half' data to 'float' first, // do the operation and convert result back to 'half' template <> __global__ void _ShrinkKernel( const half* input_data, const float bias, const float lambda, half* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); half x = input_data[id]; if ((float)x < -lambda) { output_data[id] = half((float)x + bias); } else if ((float)x > lambda) { output_data[id] = half((float)x - bias); } else { output_data[id] = (half)0; } } template <typename T> void ShrinkImpl( hipStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); hipLaunchKernelGGL(( _ShrinkKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, bias, lambda, output_data, (CUDA_LONG)N); } #define SPECIALIZED_IMPL(T) \ template void ShrinkImpl<T>(hipStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) SPECIALIZED_IMPL(uint8_t) SPECIALIZED_IMPL(int8_t) SPECIALIZED_IMPL(uint16_t) SPECIALIZED_IMPL(int16_t) SPECIALIZED_IMPL(uint32_t) SPECIALIZED_IMPL(int32_t) SPECIALIZED_IMPL(uint64_t) SPECIALIZED_IMPL(int64_t) } // namespace cuda } // namespace onnxruntime
1254805a932f72e94d0f2aba47b373279ffe1688.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "shrink_impl.h" namespace onnxruntime { namespace cuda { // Generic implementation of Shrink template <typename T> __global__ void _ShrinkKernel( const T* input_data, const float bias, const float lambda, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); T x = input_data[id]; if (x < -lambda) { output_data[id] = (T)(x + bias); } else if (x > lambda) { output_data[id] = (T)(x - bias); } else { output_data[id] = (T)0; } } // Specialized implementation for 'half' type // the idea is to convert 'half' data to 'float' first, // do the operation and convert result back to 'half' template <> __global__ void _ShrinkKernel( const half* input_data, const float bias, const float lambda, half* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); half x = input_data[id]; if ((float)x < -lambda) { output_data[id] = half((float)x + bias); } else if ((float)x > lambda) { output_data[id] = half((float)x - bias); } else { output_data[id] = (half)0; } } template <typename T> void ShrinkImpl( cudaStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); _ShrinkKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input_data, bias, lambda, output_data, (CUDA_LONG)N); } #define SPECIALIZED_IMPL(T) \ template void ShrinkImpl<T>(cudaStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) SPECIALIZED_IMPL(uint8_t) SPECIALIZED_IMPL(int8_t) SPECIALIZED_IMPL(uint16_t) SPECIALIZED_IMPL(int16_t) SPECIALIZED_IMPL(uint32_t) SPECIALIZED_IMPL(int32_t) SPECIALIZED_IMPL(uint64_t) SPECIALIZED_IMPL(int64_t) } // namespace cuda } // namespace onnxruntime
4e8a504a934cc9f72b9883f58ea158e45ead3215.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorRandom.hip" #else #include "ATen/hip/HIPContext.h" #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generateLogNormal<scalar_t>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); hipDeviceProp_t* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); hipLaunchKernelGGL(( renormRowsL1<scalar_t>) , dim3(grid), dim3(block), block.x * sizeof(scalar_t), THCState_getCurrentStream(state), THCTensor_(data)(state, t), rows, cols); } void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties hipDeviceProp_t* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); auto out = THTensor_wrap(sampled); at::native::uniform_cuda_(out, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accreal>) , dim3(grid), dim3(block), requiredShared, THCState_getCurrentStream(state), THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); hipLaunchKernelGGL(( sampleMultinomialWithReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum), THCTensor_(data)(state, normDist)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THArgCheck(_probs->dim() == 1, 1, "expected 1-D probability tensor, got %d-D probability tensor instead", _probs->dim()); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCTensor *probs = THCTensor_(newContiguous)(state, _probs); THAssert(THCTensor_(isContiguous)(state, probs)); int64_t inputsize = THCTensor_(nElement)(state, probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); hipLaunchKernelGGL(( aliasMultinomialFilter) , dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state) , THCTensor_(data)(state, _q), THCTensor_(data)(state, probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); hipLaunchKernelGGL(( aliasMultinomialSetup) , dim3(1), dim3(1), 0, THCState_getCurrentStream(state), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); scalar_t q_max = THCTensor_(maxall)(state, _q); hipLaunchKernelGGL(( condDiv), dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); THCTensor_free(state, probs); } void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCTensor *_q, THCudaLongTensor *_J, int n_sample){ THArgCheck(_q->dim() == 1, 1, "expected 1-D probability table, got %d-D probability table instead", _q->dim()); THArgCheck(_J->dim() == 1, 2, "expected 1-D alias table, got %d-D alias table instead", _J->dim()); THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); THCudaLongTensor_resize1d(state, self, n_sample); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, n_sample); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, n_sample); auto out_uniform = THTensor_wrap(uniform); auto out_bernoulli = THTensor_wrap(bernoulli); at::native::uniform_cuda_(out_uniform, 0, K); at::native::uniform_cuda_(out_bernoulli, 0, 1); hipLaunchKernelGGL(( multinomialAliasDrawKernel) , dim3(THCCeilDiv((int)n_sample+BLOCK_SIZE-1, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); THCTensor_(free)(state, uniform); THCTensor_(free)(state, bernoulli); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, hiprand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p))))) #endif void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; #undef NUM_BLOCKS #endif
4e8a504a934cc9f72b9883f58ea158e45ead3215.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorRandom.cu" #else #include "ATen/cuda/CUDAContext.h" #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generateLogNormal<scalar_t><<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); cudaDeviceProp* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); renormRowsL1<scalar_t> <<<grid, block, block.x * sizeof(scalar_t), THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, t), rows, cols); } void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties cudaDeviceProp* props = at::cuda::getCurrentDeviceProperties(); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); auto out = THTensor_wrap(sampled); at::native::uniform_cuda_(out, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); sampleMultinomialOnce<scalar_t, accreal> <<<grid, block, requiredShared, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); sampleMultinomialWithReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum), THCTensor_(data)(state, normDist)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution sampleMultinomialWithoutReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THArgCheck(_probs->dim() == 1, 1, "expected 1-D probability tensor, got %d-D probability tensor instead", _probs->dim()); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCTensor *probs = THCTensor_(newContiguous)(state, _probs); THAssert(THCTensor_(isContiguous)(state, probs)); int64_t inputsize = THCTensor_(nElement)(state, probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); aliasMultinomialFilter <<<inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state) >>>( THCTensor_(data)(state, _q), THCTensor_(data)(state, probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); aliasMultinomialSetup <<<1, 1, 0, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); scalar_t q_max = THCTensor_(maxall)(state, _q); condDiv<<< inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); THCTensor_free(state, probs); } void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCTensor *_q, THCudaLongTensor *_J, int n_sample){ THArgCheck(_q->dim() == 1, 1, "expected 1-D probability table, got %d-D probability table instead", _q->dim()); THArgCheck(_J->dim() == 1, 2, "expected 1-D alias table, got %d-D alias table instead", _J->dim()); THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); THCudaLongTensor_resize1d(state, self, n_sample); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, n_sample); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, n_sample); auto out_uniform = THTensor_wrap(uniform); auto out_bernoulli = THTensor_wrap(bernoulli); at::native::uniform_cuda_(out_uniform, 0, K); at::native::uniform_cuda_(out_bernoulli, 0, 1); multinomialAliasDrawKernel <<<THCCeilDiv((int)n_sample+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); THCTensor_(free)(state, uniform); THCTensor_(free)(state, bernoulli); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, curand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p))))) #endif void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; #undef NUM_BLOCKS #endif
aa5e23c9d19abf811b5f518bbc1119ed8222ab64.hip
// !!! This is a file automatically generated by hipify!!! /* * RegionalFeatures.cu */ #include <assert.h> #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include <hip/hip_fp16.h> #include <hipcub/hipcub.hpp> #include "RegionalFeatures.cuh" struct MaxOp { template <typename T> __device__ __forceinline__ T operator()(const T &a, const T &b) const { return (b > a) ? b : a; } }; struct MinOp { template <typename T> __device__ __forceinline__ T operator()(const T &a, const T &b) const { return (b < a) ? b : a; } }; struct SumOp { template <typename T> __device__ __forceinline__ T operator()(const T &a, const T &b) const { return a+b; } }; void MaxIntensity ( int *d_labels, unsigned short *d_img, int *d_labels_tmp, unsigned short *d_img_tmp, int *d_labels_reg, unsigned short *d_maxint_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; MaxOp max_op; // Max Intensity for Original Image // Radix Sort hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_img, d_img_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_img, d_img_tmp, image_size); // Reduce temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_img_tmp, d_maxint_reg, d_num_regions, max_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_img_tmp, d_maxint_reg, d_num_regions, max_op, image_size); } void MaxNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_maxnorm_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; MaxOp max_op; // Max Intensity for Normalized Image temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_maxnorm_reg, d_num_regions, max_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_maxnorm_reg, d_num_regions, max_op, image_size); } void MinNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_minnorm_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; MinOp min_op; // Min Intensity for Normalized Image temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_minnorm_reg, d_num_regions, min_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_minnorm_reg, d_num_regions, min_op, image_size); } void SumNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_sumnorm_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; SumOp sum_op; // Sum Intensity for Normalized Image temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_sumnorm_reg, d_num_regions, sum_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_sumnorm_reg, d_num_regions, sum_op, image_size); } void RegionalSize ( int *d_labels_tmp, int *d_labels_reg, unsigned short *d_size_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int image_size ) { size_t temp_storage_bytes = 0; // RunLengthEncode hipcub::DeviceRunLengthEncode::Encode(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_size_reg, d_num_regions, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRunLengthEncode::Encode(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_size_reg, d_num_regions, image_size); } #define IG_BLOCKDIM_X 8 #define IG_BLOCKDIM_Y 8 #define IG_BLOCKDIM_Z 8 __global__ void initGridKernel ( float *d_grid, int axis, int w, int h, int d ) { const int baseX = blockIdx.x * IG_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * IG_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * IG_BLOCKDIM_Z + threadIdx.z; const int idx = (baseZ * h + baseY) * w + baseX; if(axis == 0) { d_grid[idx] = (float)baseX; } else if(axis == 1) { d_grid[idx] = (float)baseY; } else { d_grid[idx] = (float)baseZ; } } extern "C" void initGrid ( float *d_grid, int axis, int w, int h, int d ) { assert(w % (IG_BLOCKDIM_X) == 0); assert(h % (IG_BLOCKDIM_Y) == 0); assert(d % (IG_BLOCKDIM_Z) == 0); dim3 blocks(w / (IG_BLOCKDIM_X), h/(IG_BLOCKDIM_Y), d / (IG_BLOCKDIM_Z)); dim3 threads(IG_BLOCKDIM_X, IG_BLOCKDIM_Y, IG_BLOCKDIM_Z); hipLaunchKernelGGL(( initGridKernel), dim3(blocks), dim3(threads), 0, 0, d_grid, axis, w,h, d); getLastCudaError("Error: initGridKernel() kernel execution FAILED!"); } __global__ void DivideKernel ( float *d_dst, unsigned short *d_denom ) { const int idx = blockIdx.x; d_dst[idx] /= d_denom[idx]; } extern "C" void Divide ( float *d_dst, unsigned short *d_denom, int num_regions ) { dim3 blocks(num_regions, 1, 1); dim3 threads(1,1,1); hipLaunchKernelGGL(( DivideKernel), dim3(blocks), dim3(threads), 0, 0, d_dst, d_denom); getLastCudaError("Error: DivideKernel() kernel execution FAILED!"); } void RegionalSizeAndCentroid ( int *d_labels, float *d_grid, int *d_labels_tmp, float *d_grid_tmp, int *d_labels_reg, unsigned short *d_size_reg, float *d_grid_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; float *d_grid_reg_el; for(int i = 0; i < 3; i++) { d_grid_reg_el = d_grid_reg + i*image_size; initGrid(d_grid, i, w,h,d); // Radix Sort temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_grid, d_grid_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_grid, d_grid_tmp, image_size); // Reduce SumOp sum_op; temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_grid_tmp, d_grid_reg_el, d_num_regions, sum_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_grid_tmp, d_grid_reg_el, d_num_regions, sum_op, image_size); } RegionalSize(d_labels_tmp, d_labels_reg, d_size_reg, d_cub_tmp, cub_tmp_bytes, d_num_regions, image_size); // Divide the sum by region size to get average for(int i = 0; i < 3; i++) { d_grid_reg_el = d_grid_reg + i*image_size; Divide(d_grid_reg_el, d_size_reg, num_regions); } } void AverageNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_avenorm_reg, unsigned short *d_size_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int &h_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; SumOp sum_op; // Sum Intensity for Normalized Image temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_avenorm_reg, d_num_regions, sum_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_avenorm_reg, d_num_regions, sum_op, image_size); RegionalSize(d_labels_tmp, d_labels_reg, d_size_reg, d_cub_tmp, cub_tmp_bytes, d_num_regions, image_size); // get num_regions in host checkCudaErrors(hipMemcpy(&h_num_regions, d_num_regions, sizeof(int), hipMemcpyDeviceToHost)); std::cout << "num_regions: " << h_num_regions << std::endl; // Divide sum by size to get average Divide(d_avenorm_reg, d_size_reg, h_num_regions); } template<typename T> void _HessianFeatures ( int *d_labels, T *d_hessian, int *d_labels_tmp, T *d_hessian_tmp, int *d_labels_reg, T *d_hessian_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; T *d_hessian_el; T *d_hessian_el_reg; // Hessian Element for(int i = 0 ; i < 6; i++) { // next element d_hessian_el = d_hessian + image_size*i; d_hessian_el_reg = d_hessian_reg + image_size*i; // Radix Sort temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_hessian_el, d_hessian_tmp, image_size); // std::cout << "SortPairs@hessian " << temp_storage_bytes << " , " << cub_tmp_bytes << std::endl; assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_hessian_el, d_hessian_tmp, image_size); // Regional Reduce SumOp sum_op; temp_storage_bytes = 0; hipcub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_hessian_tmp, d_hessian_el_reg, d_num_regions, sum_op, image_size); // std::cout << "ReduceByKey@hessian " << temp_storage_bytes << " , " << cub_tmp_bytes << std::endl; assert(temp_storage_bytes < cub_tmp_bytes); hipcub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_hessian_tmp, d_hessian_el_reg, d_num_regions, sum_op, image_size); } } // explicit instantiation template void _HessianFeatures<float> ( int *d_labels, float *d_hessian, int *d_labels_tmp, float *d_hessian_tmp, int *d_labels_reg, float *d_hessian_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ); /*template void _HessianFeatures<half> ( int *d_labels, half *d_hessian, int *d_labels_tmp, half *d_hessian_tmp, int *d_labels_reg, half *d_hessian_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ); */
aa5e23c9d19abf811b5f518bbc1119ed8222ab64.cu
/* * RegionalFeatures.cu */ #include <assert.h> #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include <cuda_fp16.h> #include <cub/cub.cuh> #include "RegionalFeatures.cuh" struct MaxOp { template <typename T> __device__ __forceinline__ T operator()(const T &a, const T &b) const { return (b > a) ? b : a; } }; struct MinOp { template <typename T> __device__ __forceinline__ T operator()(const T &a, const T &b) const { return (b < a) ? b : a; } }; struct SumOp { template <typename T> __device__ __forceinline__ T operator()(const T &a, const T &b) const { return a+b; } }; void MaxIntensity ( int *d_labels, unsigned short *d_img, int *d_labels_tmp, unsigned short *d_img_tmp, int *d_labels_reg, unsigned short *d_maxint_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; MaxOp max_op; // Max Intensity for Original Image // Radix Sort cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_img, d_img_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_img, d_img_tmp, image_size); // Reduce temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_img_tmp, d_maxint_reg, d_num_regions, max_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_img_tmp, d_maxint_reg, d_num_regions, max_op, image_size); } void MaxNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_maxnorm_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; MaxOp max_op; // Max Intensity for Normalized Image temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_maxnorm_reg, d_num_regions, max_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_maxnorm_reg, d_num_regions, max_op, image_size); } void MinNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_minnorm_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; MinOp min_op; // Min Intensity for Normalized Image temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_minnorm_reg, d_num_regions, min_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_minnorm_reg, d_num_regions, min_op, image_size); } void SumNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_sumnorm_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; SumOp sum_op; // Sum Intensity for Normalized Image temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_sumnorm_reg, d_num_regions, sum_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_sumnorm_reg, d_num_regions, sum_op, image_size); } void RegionalSize ( int *d_labels_tmp, int *d_labels_reg, unsigned short *d_size_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int image_size ) { size_t temp_storage_bytes = 0; // RunLengthEncode cub::DeviceRunLengthEncode::Encode(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_size_reg, d_num_regions, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRunLengthEncode::Encode(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_size_reg, d_num_regions, image_size); } #define IG_BLOCKDIM_X 8 #define IG_BLOCKDIM_Y 8 #define IG_BLOCKDIM_Z 8 __global__ void initGridKernel ( float *d_grid, int axis, int w, int h, int d ) { const int baseX = blockIdx.x * IG_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * IG_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * IG_BLOCKDIM_Z + threadIdx.z; const int idx = (baseZ * h + baseY) * w + baseX; if(axis == 0) { d_grid[idx] = (float)baseX; } else if(axis == 1) { d_grid[idx] = (float)baseY; } else { d_grid[idx] = (float)baseZ; } } extern "C" void initGrid ( float *d_grid, int axis, int w, int h, int d ) { assert(w % (IG_BLOCKDIM_X) == 0); assert(h % (IG_BLOCKDIM_Y) == 0); assert(d % (IG_BLOCKDIM_Z) == 0); dim3 blocks(w / (IG_BLOCKDIM_X), h/(IG_BLOCKDIM_Y), d / (IG_BLOCKDIM_Z)); dim3 threads(IG_BLOCKDIM_X, IG_BLOCKDIM_Y, IG_BLOCKDIM_Z); initGridKernel<<<blocks, threads>>>(d_grid, axis, w,h, d); getLastCudaError("Error: initGridKernel() kernel execution FAILED!"); } __global__ void DivideKernel ( float *d_dst, unsigned short *d_denom ) { const int idx = blockIdx.x; d_dst[idx] /= d_denom[idx]; } extern "C" void Divide ( float *d_dst, unsigned short *d_denom, int num_regions ) { dim3 blocks(num_regions, 1, 1); dim3 threads(1,1,1); DivideKernel<<<blocks, threads>>>(d_dst, d_denom); getLastCudaError("Error: DivideKernel() kernel execution FAILED!"); } void RegionalSizeAndCentroid ( int *d_labels, float *d_grid, int *d_labels_tmp, float *d_grid_tmp, int *d_labels_reg, unsigned short *d_size_reg, float *d_grid_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; float *d_grid_reg_el; for(int i = 0; i < 3; i++) { d_grid_reg_el = d_grid_reg + i*image_size; initGrid(d_grid, i, w,h,d); // Radix Sort temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_grid, d_grid_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_grid, d_grid_tmp, image_size); // Reduce SumOp sum_op; temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_grid_tmp, d_grid_reg_el, d_num_regions, sum_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_grid_tmp, d_grid_reg_el, d_num_regions, sum_op, image_size); } RegionalSize(d_labels_tmp, d_labels_reg, d_size_reg, d_cub_tmp, cub_tmp_bytes, d_num_regions, image_size); // Divide the sum by region size to get average for(int i = 0; i < 3; i++) { d_grid_reg_el = d_grid_reg + i*image_size; Divide(d_grid_reg_el, d_size_reg, num_regions); } } void AverageNormalized ( int *d_labels, float *d_norm, int *d_labels_tmp, float *d_norm_tmp, int *d_labels_reg, float *d_avenorm_reg, unsigned short *d_size_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int &h_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; SumOp sum_op; // Sum Intensity for Normalized Image temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_norm, d_norm_tmp, image_size); temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_avenorm_reg, d_num_regions, sum_op, image_size); assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_norm_tmp, d_avenorm_reg, d_num_regions, sum_op, image_size); RegionalSize(d_labels_tmp, d_labels_reg, d_size_reg, d_cub_tmp, cub_tmp_bytes, d_num_regions, image_size); // get num_regions in host checkCudaErrors(cudaMemcpy(&h_num_regions, d_num_regions, sizeof(int), cudaMemcpyDeviceToHost)); std::cout << "num_regions: " << h_num_regions << std::endl; // Divide sum by size to get average Divide(d_avenorm_reg, d_size_reg, h_num_regions); } template<typename T> void _HessianFeatures ( int *d_labels, T *d_hessian, int *d_labels_tmp, T *d_hessian_tmp, int *d_labels_reg, T *d_hessian_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ) { int image_size = w*h*d; size_t temp_storage_bytes = 0; T *d_hessian_el; T *d_hessian_el_reg; // Hessian Element for(int i = 0 ; i < 6; i++) { // next element d_hessian_el = d_hessian + image_size*i; d_hessian_el_reg = d_hessian_reg + image_size*i; // Radix Sort temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_labels, d_labels_tmp, d_hessian_el, d_hessian_tmp, image_size); // std::cout << "SortPairs@hessian " << temp_storage_bytes << " , " << cub_tmp_bytes << std::endl; assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceRadixSort::SortPairs(d_cub_tmp, temp_storage_bytes, d_labels, d_labels_tmp, d_hessian_el, d_hessian_tmp, image_size); // Regional Reduce SumOp sum_op; temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(NULL, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_hessian_tmp, d_hessian_el_reg, d_num_regions, sum_op, image_size); // std::cout << "ReduceByKey@hessian " << temp_storage_bytes << " , " << cub_tmp_bytes << std::endl; assert(temp_storage_bytes < cub_tmp_bytes); cub::DeviceReduce::ReduceByKey(d_cub_tmp, temp_storage_bytes, d_labels_tmp, d_labels_reg, d_hessian_tmp, d_hessian_el_reg, d_num_regions, sum_op, image_size); } } // explicit instantiation template void _HessianFeatures<float> ( int *d_labels, float *d_hessian, int *d_labels_tmp, float *d_hessian_tmp, int *d_labels_reg, float *d_hessian_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ); /*template void _HessianFeatures<half> ( int *d_labels, half *d_hessian, int *d_labels_tmp, half *d_hessian_tmp, int *d_labels_reg, half *d_hessian_reg, void *d_cub_tmp, size_t cub_tmp_bytes, int *d_num_regions, int w, int h, int d ); */
93344aa0431571e776dc5f1253dc1740fc75d618.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/regularizers/no_regularizer.hpp" #include "HugeCTR/include/utils.cuh" #include <utility> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { template <typename T> NoRegularizer<T>::NoRegularizer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff, const std::shared_ptr<GeneralBuffer<T>>& wgrad_buff, const int batch_size, const int device_id) : Regularizer<T>(weight_buff, wgrad_buff, batch_size, device_id) {} template <typename T> void NoRegularizer<T>::do_compute_rterm(const float* weight, float* rterm, int num_elements, hipStream_t stream) { *rterm = 0.0f; } template <typename T> void NoRegularizer<T>::do_initialize_wgrad(const float* weight, T* wgrad, int num_elements, hipStream_t stream) { int n_blocks = Regularizer<T>::get_n_sms() * 4; int block_size = 512; hipLaunchKernelGGL(( initialize_array), dim3(n_blocks), dim3(block_size), 0, stream, wgrad, num_elements, T(0.0f)); } template class NoRegularizer<__half>; template class NoRegularizer<float>; } // namespace HugeCTR
93344aa0431571e776dc5f1253dc1740fc75d618.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/regularizers/no_regularizer.hpp" #include "HugeCTR/include/utils.cuh" #include <utility> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { template <typename T> NoRegularizer<T>::NoRegularizer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff, const std::shared_ptr<GeneralBuffer<T>>& wgrad_buff, const int batch_size, const int device_id) : Regularizer<T>(weight_buff, wgrad_buff, batch_size, device_id) {} template <typename T> void NoRegularizer<T>::do_compute_rterm(const float* weight, float* rterm, int num_elements, cudaStream_t stream) { *rterm = 0.0f; } template <typename T> void NoRegularizer<T>::do_initialize_wgrad(const float* weight, T* wgrad, int num_elements, cudaStream_t stream) { int n_blocks = Regularizer<T>::get_n_sms() * 4; int block_size = 512; initialize_array<<<n_blocks, block_size, 0, stream>>>(wgrad, num_elements, T(0.0f)); } template class NoRegularizer<__half>; template class NoRegularizer<float>; } // namespace HugeCTR
4037a572aaabc9719fe2673d26c94aa7c6bd1384.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" static unsigned int GRID_SIZE_N; static unsigned int GRID_SIZE_4N; static unsigned int MAX_STATE_VALUE; __global__ static void cudaEvaluateRightGammaKernel(int *wptr, double *x1, double *x2, double *diagptable, double *output, const int limit) { const int i = blockIdx.x * blockDim.x + threadIdx.x; output[i] = 0.0; if (i >= limit) { return; } int j; double term = 0.0; x1 += 16 * i; x2 += 16 * i; #pragma unroll for (j = 0; j < 4; j++) { term += x1[0] * x2[0] * diagptable[0]; term += x1[1] * x2[1] * diagptable[1]; term += x1[2] * x2[2] * diagptable[2]; term += x1[3] * x2[3] * diagptable[3]; x1 += 4; x2 += 4; diagptable += 4; } term = log(0.25 * fabs(term)); output[i] += wptr[i] * term; }
4037a572aaabc9719fe2673d26c94aa7c6bd1384.cu
#include "includes.h" static unsigned int GRID_SIZE_N; static unsigned int GRID_SIZE_4N; static unsigned int MAX_STATE_VALUE; __global__ static void cudaEvaluateRightGammaKernel(int *wptr, double *x1, double *x2, double *diagptable, double *output, const int limit) { const int i = blockIdx.x * blockDim.x + threadIdx.x; output[i] = 0.0; if (i >= limit) { return; } int j; double term = 0.0; x1 += 16 * i; x2 += 16 * i; #pragma unroll for (j = 0; j < 4; j++) { term += x1[0] * x2[0] * diagptable[0]; term += x1[1] * x2[1] * diagptable[1]; term += x1[2] * x2[2] * diagptable[2]; term += x1[3] * x2[3] * diagptable[3]; x1 += 4; x2 += 4; diagptable += 4; } term = log(0.25 * fabs(term)); output[i] += wptr[i] * term; }
8340f2ffd2bc9e97654c4276a88daae6ca9eae79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Author: Sardar Haque Email: [email protected] */ #include "../include/fastPolyEvaluation.h" using namespace std; void fastEvaluation(int k, sfixn p, sfixn *M1, sfixn *M2, sfixn *Fpoly, int check) { int i, j; /* creating subproduct tree*/ sfixn *Mgpu[MAX_LEVEL], *A, *B, *C, *D, *E, *F, *G; sfixn *MinvGpu[MAX_LEVEL]; int polyLengthCurrent = 2; int polyOnLayerCurrent = 1L << (k-2); int polyLengthNext, polyOnLayerNext; hipMalloc((void **)&Mgpu[0], sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent); hipDeviceSynchronize(); hipMemcpy(Mgpu[0], M1, sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent, hipMemcpyHostToDevice); hipDeviceSynchronize(); int threadsForAmul, mulInThreadBlock, blockNo; int L = 1L << (k-2); int l; sfixn w, winv, ninv; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for(i = 1; i < (k-1); ++i) { if(i <= plainMulLimit) { // //cout<<i<<" "<<polyLengthCurrent<<" "<<polyOnLayerCurrent<<" "<<polyOnLayerCurrent*polyLengthCurrent; // polyOnLayerNext = polyOnLayerCurrent/2; polyLengthNext = 2*polyLengthCurrent -1; // //cout<<" "<<polyLengthNext<<" "<<polyOnLayerNext<<" "<<polyOnLayerNext*polyLengthNext<<endl; // threadsForAmul = 2*polyLengthCurrent; mulInThreadBlock = (int)floor((double)Tmul/(double)threadsForAmul); blockNo = (int)ceil( ((double) polyOnLayerCurrent/(double) mulInThreadBlock)*0.5 ); hipMalloc((void **)&Mgpu[i], sizeof(sfixn)*polyOnLayerNext*polyLengthNext); hipDeviceSynchronize(); hipLaunchKernelGGL(( listPlainMulGpu), dim3(blockNo), dim3(Tmul), 0, 0, Mgpu[i-1], Mgpu[i], polyLengthCurrent, polyOnLayerCurrent, threadsForAmul, mulInThreadBlock, p); hipDeviceSynchronize(); if(i == plainMulLimit) { hipMalloc((void **)&Mgpu[i+1], sizeof(sfixn)*(polyOnLayerNext)*(polyLengthNext-1)); hipDeviceSynchronize(); hipMalloc((void **)&MinvGpu[i], sizeof(sfixn)*(polyOnLayerNext)*(polyLengthNext-1)); hipDeviceSynchronize(); blockNo = (int)ceil((double)(polyOnLayerNext)/(double)(Tinv)); hipLaunchKernelGGL(( listPolyinv), dim3(blockNo), dim3(Tinv), 0, 0, Mgpu[i], MinvGpu[i], polyOnLayerNext, p); hipLaunchKernelGGL(( copyMgpu), dim3(polyOnLayerNext) ,dim3((polyLengthNext -1)), 0, 0, Mgpu[i+1], Mgpu[i], polyLengthNext); hipDeviceSynchronize(); hipFree(Mgpu[i]); Mgpu[i] = Mgpu[i+1]; } polyLengthCurrent = polyLengthNext; polyOnLayerCurrent = polyOnLayerNext; } else { l = 1L << (i-1); hipMalloc((void **)&A, sizeof(sfixn)*L); hipMalloc((void **)&B, sizeof(sfixn)*L); C = Mgpu[i-1]; hipMemcpy(A, C, sizeof(sfixn)*L, hipMemcpyDeviceToDevice); hipMemcpy(B, &(C[l]), sizeof(sfixn)*(L-l), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( zeroInbetween), dim3((int)ceil((double)L/(double)(Tmax*2))), dim3(Tmax), 0, 0, A, B, L/2, l ); // //cout<<i<<" "<<l<<" "<<L<<endl; //printGPUArray(C, L); //printGPUArray(A, L); //printGPUArray(B, L); // hipMalloc((void **)&Mgpu[i], sizeof(sfixn)*L); hipLaunchKernelGGL(( allZero), dim3((int)ceil((double)L/(double)Tmax)) , dim3(Tmax), 0, 0, Mgpu[i], L); // //printGPUArray(Mgpu[i], L); // hipLaunchKernelGGL(( pointAdd2), dim3((int)ceil((double)(L/2)/(double)Tmax)) , dim3(Tmax), 0, 0, Mgpu[i] , Mgpu[i-1], l, L/2, p); // //printGPUArray(Mgpu[i], L); // w = primitive_root(i, p); l = 1L << (k-i -2); // //cout<<i<<" "<<w<<" "<<l<<endl; // list_stockham_dev(A, l, i, w, p); list_stockham_dev(B, l, i, w, p); hipLaunchKernelGGL(( pointMul), dim3((int)ceil((double)L/(double)Tmax)) , dim3(Tmax), 0, 0, A, B, L, p); winv = inv_mod(w, p); list_stockham_dev(A, l, i, winv, p); w = (1L << i); ninv = inv_mod(w, p); hipLaunchKernelGGL(( scalarMul), dim3((int)ceil((double)L/(double)Tmax)) , dim3(Tmax), 0, 0, A, ninv, L, p); hipLaunchKernelGGL(( pointAdd), dim3((int)ceil((double)L/(double)Tmax)) , dim3(Tmax), 0, 0, Mgpu[i], A, L, p); hipFree(A); hipFree(B); } } hipEventRecord(stop, 0); hipEventSynchronize(stop); float outerTime; hipEventElapsedTime(&outerTime, start, stop); cout<<outerTime/1000.0<<" seconds for subproductree and the subInversetree of plain arithmatic level."<<endl; /* creating subinverse tree*/ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); L = 1L << (k-2); //for(i = plainMulLimit; i < (k-2); ++i) for(i = plainMulLimit; i < (k-2); ++i) { l = 1L << (i); j = 1L << (k-2-i); hipMalloc((void **)&A, sizeof(sfixn)*L); blockNo = (int)ceil((double)(L)/(double)(Tmax)); hipLaunchKernelGGL(( listReversePoly), dim3(blockNo), dim3(Tmax), 0, 0, A, MinvGpu[i], l , j); hipMalloc((void **)&B, sizeof(sfixn)*L); hipLaunchKernelGGL(( listCpLdZeroPoly), dim3(blockNo), dim3(Tmax), 0, 0, B, A ,l , j); hipMalloc((void **)&C, sizeof(sfixn)*L); hipMemcpy(C, Mgpu[i], sizeof(sfixn)*L, hipMemcpyDeviceToDevice); // //cout<<"creating subInverse tree at: "<<i+1<<" the number of poly is: "<<j<<endl; //cout<<"Reverse of Minv["<<i<<"]: "<<endl; //printGPUArray(A, L); //cout<<"Reverse of Minv["<<i<<"] excluding leading coefficient: "<<endl; //printGPUArray(B, L); //cout<<"M["<<i<<"]: "<<endl; //printGPUArray(C, L); // w = primitive_root(i, p); // //cout<<"2^i th root of unity is: "<<w<<endl; // list_stockham_dev(A, j, i, w, p); list_stockham_dev(C, j, i, w, p); hipLaunchKernelGGL(( pointMul), dim3((int)ceil((double)L/(double)Tmax)) , dim3(Tmax), 0, 0, A, C, L, p); winv = inv_mod(w, p); list_stockham_dev(A, j, i, winv, p); ninv = inv_mod(l, p); hipLaunchKernelGGL(( scalarMul), dim3((int)ceil((double)L/(double)Tmax)) , dim3(Tmax), 0, 0, A, ninv, L, p); hipLaunchKernelGGL(( pointAdd), dim3((int)ceil((double)L/(double)Tmax)) , dim3(Tmax), 0, 0, A, B, L, p); blockNo = (int)ceil((double)(L)/(double)(Tmax)); hipLaunchKernelGGL(( listReversePoly), dim3(blockNo), dim3(Tmax), 0, 0, B, A, l , j); hipLaunchKernelGGL(( allNeg), dim3(blockNo), dim3(Tmax) , 0, 0, B, L, p); // //cout<<"T:"<<endl; //printGPUArray(B, L); // hipMalloc((void **)&D, sizeof(sfixn)*L*2); hipMalloc((void **)&E, sizeof(sfixn)*L*2); hipMalloc((void **)&F, sizeof(sfixn)*L*2); hipMalloc((void **)&G, sizeof(sfixn)*L*2); blockNo = (int)ceil((double)(L*2)/(double)(Tmax)); hipLaunchKernelGGL(( listPolyDegInc), dim3(blockNo), dim3(Tmax), 0, 0, MinvGpu[i], D, l, j, 2*l); hipLaunchKernelGGL(( listPolyDegInc), dim3(blockNo), dim3(Tmax), 0, 0, B, E, l, j, 2*l); hipMemcpy(F, D, sizeof(sfixn)*L*2, hipMemcpyDeviceToDevice); // //cout<<"Minv["<<i<<"] padded by zeros:"<<endl; //printGPUArray(D, 2*L); //cout<<"T padded by zeros"<<endl; //printGPUArray(E, 2*L); //cout<<"Minv["<<i<<"] padded by zeros"<<endl; //printGPUArray(F, 2*L); // w = primitive_root(i+1, p); list_stockham_dev(E, j, i+1, w, p); list_stockham_dev(D, j, i+1, w, p); hipLaunchKernelGGL(( pointMul), dim3((int)ceil((double)(L*2)/(double)Tmax)) , dim3(Tmax), 0, 0, E, D, 2*L, p); winv = inv_mod(w, p); list_stockham_dev(E, j, i+1, winv, p); ninv = inv_mod(l*2, p); hipLaunchKernelGGL(( scalarMul), dim3((int)ceil((double)(L*2)/(double)Tmax)) , dim3(Tmax), 0, 0, E, ninv, L*2, p); hipLaunchKernelGGL(( listCpUpperCuda), dim3((int)ceil((double)(L)/(double)Tmax)) , dim3(Tmax), 0, 0, F, E, L, l); // //cout<<"Minv["<<i<<"] with one step Newton iteration:"<<endl; //printGPUArray(F, 2*L); // hipMemcpy(G, &(F[2*l]), sizeof(sfixn)*(2*L-2*l), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( zeroInbetween), dim3((int)ceil((double)L/(double)(Tmax))), dim3(Tmax), 0, 0, F, G, L, 2*l ); j = j/2; w = primitive_root(i+2, p); list_stockham_dev(F, j, i+2, w, p); list_stockham_dev(G, j, i+2, w, p); hipLaunchKernelGGL(( pointMul), dim3((int)ceil((double)(L*2)/(double)Tmax)) , dim3(Tmax), 0, 0, F, G, L*2, p); winv = inv_mod(w, p); list_stockham_dev(F, j, i+2, winv, p); ninv = inv_mod(l*4, p); hipLaunchKernelGGL(( scalarMul), dim3((int)ceil((double)(L*2)/(double)Tmax)) , dim3(Tmax), 0, 0, F, ninv, 2*L, p); // //printGPUArray(F, 2*L); // hipMalloc((void **)&MinvGpu[i+1], sizeof(sfixn)*L); hipLaunchKernelGGL(( listCpLowerCuda), dim3((int)ceil((double)(L)/(double)Tmax)) , dim3(Tmax), 0, 0, MinvGpu[i+1], F, L, 2*l); hipDeviceSynchronize(); // //printGPUArray(MinvGpu[i+1], L); // hipFree(A); hipFree(B); hipFree(C); hipFree(D); hipFree(E); hipFree(F); hipFree(G); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&outerTime, start, stop); cout<<outerTime/1000.0<<" seconds for the subInversetree of FFT level."<<endl; /* fast evaluation */ sfixn *FpolyGpu[MAX_LEVEL]; hipMalloc((void **)&FpolyGpu[k-1], sizeof(sfixn)*(1L << (k-1))); hipMemcpy(FpolyGpu[k-1], Fpoly, sizeof(sfixn)*(1L << (k-1)), hipMemcpyHostToDevice); hipMalloc((void **)&FpolyGpu[k-2], sizeof(sfixn)*(1L << (k-2))); int lm, linv, lf; sfixn *H, *I, *J; //for(i = k-2; i >= 0; --i) //{ i = k-2; j = 1L << (k-2-i); lm = 1L << (i); linv = lm; lf = 1L << (i+1); // printGPUArray(FpolyGpu[k-1], lf); // hipMalloc((void **)&H, sizeof(sfixn)*(lf)); blockNo = (int)ceil((double)(lf*j)/(double)(Tmax)); hipLaunchKernelGGL(( listReversePoly), dim3(blockNo), dim3(Tmax), 0, 0, H, FpolyGpu[k-1], lf , j); // printGPUArray(H, lf); // hipMalloc((void **)&I, sizeof(sfixn)*(lf)); hipMemcpy(I, &(H[lm]), sizeof(sfixn)*(lm), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( zeroInbetween), dim3((int)ceil((double)lf/(double)(Tmax*2))), dim3(Tmax), 0, 0, H, I, lf/2, lm ); hipMalloc((void **)&J, sizeof(sfixn)*(lf)); blockNo = (int)ceil((double)(lf *j)/(double)(Tmax)); hipLaunchKernelGGL(( listPolyDegInc), dim3(blockNo), dim3(Tmax), 0, 0, MinvGpu[i], J, linv, j, lf); // printGPUArray(H, lf); printGPUArray(I, lf); printGPUArray(J, lf); // w = primitive_root(i+1, p); list_stockham_dev(H, j, i+1, w, p); list_stockham_dev(I, j, i+1, w, p); list_stockham_dev(J, j, i+1, w, p); hipLaunchKernelGGL(( pointMul), dim3((int)ceil((double)(lf)/(double)Tmax)) , dim3(Tmax), 0, 0, H, J, lf, p); hipLaunchKernelGGL(( pointMul), dim3((int)ceil((double)(lf)/(double)Tmax)) , dim3(Tmax), 0, 0, I, J, lf, p); winv = inv_mod(w, p); list_stockham_dev(H, j, i+1, winv, p); list_stockham_dev(I, j, i+1, winv, p); ninv = inv_mod(lf, p); hipLaunchKernelGGL(( scalarMul), dim3((int)ceil((double)(lf)/(double)Tmax)) , dim3(Tmax), 0, 0, H, ninv, lf, p); hipLaunchKernelGGL(( scalarMul), dim3((int)ceil((double)(lf)/(double)Tmax)) , dim3(Tmax), 0, 0, I, ninv, lf, p); // printGPUArray(H, lf); printGPUArray(I, lf); // hipLaunchKernelGGL(( list2wayCp), dim3((int)ceil((double)(linv )/(double)Tmax)) , dim3(Tmax), 0, 0, H, I, linv, linv, p); // printGPUArray(H, lf); // hipFree(H); hipFree(I); hipFree(J); //} struct status trees; /* Copy the subproduct tree */ for(i = 0; i < (k-1) ; ++i) { polyLengthCurrent = 1 << (i); polyOnLayerCurrent = 1 << (k-i-2); if(i < plainMulLimit) ++polyLengthCurrent; trees.M[i] = new sfixn [polyLengthCurrent*polyOnLayerCurrent]; hipMemcpy(trees.M[i] , Mgpu[i], sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(Mgpu[i]); // //cout<<"copying subproduct tree: "<<i<<endl; // } for(i = plainMulLimit; i < (k-1) ; ++i) { polyLengthCurrent = 1 << (i); polyOnLayerCurrent = 1 << (k-i-2); // //cout<<"copying subinverse tree: "<<i<<endl; // trees.InvM[i] = new sfixn [polyLengthCurrent*polyOnLayerCurrent]; hipMemcpy(trees.InvM[i] , MinvGpu[i], sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(MinvGpu[i]); } return trees; }
8340f2ffd2bc9e97654c4276a88daae6ca9eae79.cu
/* Author: Sardar Haque Email: [email protected] */ #include "../include/fastPolyEvaluation.h" using namespace std; void fastEvaluation(int k, sfixn p, sfixn *M1, sfixn *M2, sfixn *Fpoly, int check) { int i, j; /* creating subproduct tree*/ sfixn *Mgpu[MAX_LEVEL], *A, *B, *C, *D, *E, *F, *G; sfixn *MinvGpu[MAX_LEVEL]; int polyLengthCurrent = 2; int polyOnLayerCurrent = 1L << (k-2); int polyLengthNext, polyOnLayerNext; cudaMalloc((void **)&Mgpu[0], sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent); cudaThreadSynchronize(); cudaMemcpy(Mgpu[0], M1, sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent, cudaMemcpyHostToDevice); cudaThreadSynchronize(); int threadsForAmul, mulInThreadBlock, blockNo; int L = 1L << (k-2); int l; sfixn w, winv, ninv; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for(i = 1; i < (k-1); ++i) { if(i <= plainMulLimit) { // //cout<<i<<" "<<polyLengthCurrent<<" "<<polyOnLayerCurrent<<" "<<polyOnLayerCurrent*polyLengthCurrent; // polyOnLayerNext = polyOnLayerCurrent/2; polyLengthNext = 2*polyLengthCurrent -1; // //cout<<" "<<polyLengthNext<<" "<<polyOnLayerNext<<" "<<polyOnLayerNext*polyLengthNext<<endl; // threadsForAmul = 2*polyLengthCurrent; mulInThreadBlock = (int)floor((double)Tmul/(double)threadsForAmul); blockNo = (int)ceil( ((double) polyOnLayerCurrent/(double) mulInThreadBlock)*0.5 ); cudaMalloc((void **)&Mgpu[i], sizeof(sfixn)*polyOnLayerNext*polyLengthNext); cudaThreadSynchronize(); listPlainMulGpu<<<blockNo, Tmul>>>(Mgpu[i-1], Mgpu[i], polyLengthCurrent, polyOnLayerCurrent, threadsForAmul, mulInThreadBlock, p); cudaThreadSynchronize(); if(i == plainMulLimit) { cudaMalloc((void **)&Mgpu[i+1], sizeof(sfixn)*(polyOnLayerNext)*(polyLengthNext-1)); cudaThreadSynchronize(); cudaMalloc((void **)&MinvGpu[i], sizeof(sfixn)*(polyOnLayerNext)*(polyLengthNext-1)); cudaThreadSynchronize(); blockNo = (int)ceil((double)(polyOnLayerNext)/(double)(Tinv)); listPolyinv<<<blockNo, Tinv>>>(Mgpu[i], MinvGpu[i], polyOnLayerNext, p); copyMgpu<<<polyOnLayerNext ,(polyLengthNext -1)>>>(Mgpu[i+1], Mgpu[i], polyLengthNext); cudaThreadSynchronize(); cudaFree(Mgpu[i]); Mgpu[i] = Mgpu[i+1]; } polyLengthCurrent = polyLengthNext; polyOnLayerCurrent = polyOnLayerNext; } else { l = 1L << (i-1); cudaMalloc((void **)&A, sizeof(sfixn)*L); cudaMalloc((void **)&B, sizeof(sfixn)*L); C = Mgpu[i-1]; cudaMemcpy(A, C, sizeof(sfixn)*L, cudaMemcpyDeviceToDevice); cudaMemcpy(B, &(C[l]), sizeof(sfixn)*(L-l), cudaMemcpyDeviceToDevice); zeroInbetween<<<(int)ceil((double)L/(double)(Tmax*2)), Tmax>>>(A, B, L/2, l ); // //cout<<i<<" "<<l<<" "<<L<<endl; //printGPUArray(C, L); //printGPUArray(A, L); //printGPUArray(B, L); // cudaMalloc((void **)&Mgpu[i], sizeof(sfixn)*L); allZero<<<(int)ceil((double)L/(double)Tmax) , Tmax>>>(Mgpu[i], L); // //printGPUArray(Mgpu[i], L); // pointAdd2<<<(int)ceil((double)(L/2)/(double)Tmax) , Tmax>>>(Mgpu[i] , Mgpu[i-1], l, L/2, p); // //printGPUArray(Mgpu[i], L); // w = primitive_root(i, p); l = 1L << (k-i -2); // //cout<<i<<" "<<w<<" "<<l<<endl; // list_stockham_dev(A, l, i, w, p); list_stockham_dev(B, l, i, w, p); pointMul<<<(int)ceil((double)L/(double)Tmax) , Tmax>>>( A, B, L, p); winv = inv_mod(w, p); list_stockham_dev(A, l, i, winv, p); w = (1L << i); ninv = inv_mod(w, p); scalarMul<<<(int)ceil((double)L/(double)Tmax) , Tmax>>>( A, ninv, L, p); pointAdd<<<(int)ceil((double)L/(double)Tmax) , Tmax>>>( Mgpu[i], A, L, p); cudaFree(A); cudaFree(B); } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float outerTime; cudaEventElapsedTime(&outerTime, start, stop); cout<<outerTime/1000.0<<" seconds for subproductree and the subInversetree of plain arithmatic level."<<endl; /* creating subinverse tree*/ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); L = 1L << (k-2); //for(i = plainMulLimit; i < (k-2); ++i) for(i = plainMulLimit; i < (k-2); ++i) { l = 1L << (i); j = 1L << (k-2-i); cudaMalloc((void **)&A, sizeof(sfixn)*L); blockNo = (int)ceil((double)(L)/(double)(Tmax)); listReversePoly<<<blockNo, Tmax>>>(A, MinvGpu[i], l , j); cudaMalloc((void **)&B, sizeof(sfixn)*L); listCpLdZeroPoly<<<blockNo, Tmax>>>(B, A ,l , j); cudaMalloc((void **)&C, sizeof(sfixn)*L); cudaMemcpy(C, Mgpu[i], sizeof(sfixn)*L, cudaMemcpyDeviceToDevice); // //cout<<"creating subInverse tree at: "<<i+1<<" the number of poly is: "<<j<<endl; //cout<<"Reverse of Minv["<<i<<"]: "<<endl; //printGPUArray(A, L); //cout<<"Reverse of Minv["<<i<<"] excluding leading coefficient: "<<endl; //printGPUArray(B, L); //cout<<"M["<<i<<"]: "<<endl; //printGPUArray(C, L); // w = primitive_root(i, p); // //cout<<"2^i th root of unity is: "<<w<<endl; // list_stockham_dev(A, j, i, w, p); list_stockham_dev(C, j, i, w, p); pointMul<<<(int)ceil((double)L/(double)Tmax) , Tmax>>>( A, C, L, p); winv = inv_mod(w, p); list_stockham_dev(A, j, i, winv, p); ninv = inv_mod(l, p); scalarMul<<<(int)ceil((double)L/(double)Tmax) , Tmax>>>( A, ninv, L, p); pointAdd<<<(int)ceil((double)L/(double)Tmax) , Tmax>>>( A, B, L, p); blockNo = (int)ceil((double)(L)/(double)(Tmax)); listReversePoly<<<blockNo, Tmax>>>(B, A, l , j); allNeg<<<blockNo, Tmax >>>(B, L, p); // //cout<<"T:"<<endl; //printGPUArray(B, L); // cudaMalloc((void **)&D, sizeof(sfixn)*L*2); cudaMalloc((void **)&E, sizeof(sfixn)*L*2); cudaMalloc((void **)&F, sizeof(sfixn)*L*2); cudaMalloc((void **)&G, sizeof(sfixn)*L*2); blockNo = (int)ceil((double)(L*2)/(double)(Tmax)); listPolyDegInc<<<blockNo, Tmax>>>(MinvGpu[i], D, l, j, 2*l); listPolyDegInc<<<blockNo, Tmax>>>(B, E, l, j, 2*l); cudaMemcpy(F, D, sizeof(sfixn)*L*2, cudaMemcpyDeviceToDevice); // //cout<<"Minv["<<i<<"] padded by zeros:"<<endl; //printGPUArray(D, 2*L); //cout<<"T padded by zeros"<<endl; //printGPUArray(E, 2*L); //cout<<"Minv["<<i<<"] padded by zeros"<<endl; //printGPUArray(F, 2*L); // w = primitive_root(i+1, p); list_stockham_dev(E, j, i+1, w, p); list_stockham_dev(D, j, i+1, w, p); pointMul<<<(int)ceil((double)(L*2)/(double)Tmax) , Tmax>>>( E, D, 2*L, p); winv = inv_mod(w, p); list_stockham_dev(E, j, i+1, winv, p); ninv = inv_mod(l*2, p); scalarMul<<<(int)ceil((double)(L*2)/(double)Tmax) , Tmax>>>( E, ninv, L*2, p); listCpUpperCuda<<<(int)ceil((double)(L)/(double)Tmax) , Tmax>>>( F, E, L, l); // //cout<<"Minv["<<i<<"] with one step Newton iteration:"<<endl; //printGPUArray(F, 2*L); // cudaMemcpy(G, &(F[2*l]), sizeof(sfixn)*(2*L-2*l), cudaMemcpyDeviceToDevice); zeroInbetween<<<(int)ceil((double)L/(double)(Tmax)), Tmax>>>(F, G, L, 2*l ); j = j/2; w = primitive_root(i+2, p); list_stockham_dev(F, j, i+2, w, p); list_stockham_dev(G, j, i+2, w, p); pointMul<<<(int)ceil((double)(L*2)/(double)Tmax) , Tmax>>>( F, G, L*2, p); winv = inv_mod(w, p); list_stockham_dev(F, j, i+2, winv, p); ninv = inv_mod(l*4, p); scalarMul<<<(int)ceil((double)(L*2)/(double)Tmax) , Tmax>>>( F, ninv, 2*L, p); // //printGPUArray(F, 2*L); // cudaMalloc((void **)&MinvGpu[i+1], sizeof(sfixn)*L); listCpLowerCuda<<<(int)ceil((double)(L)/(double)Tmax) , Tmax>>>( MinvGpu[i+1], F, L, 2*l); cudaThreadSynchronize(); // //printGPUArray(MinvGpu[i+1], L); // cudaFree(A); cudaFree(B); cudaFree(C); cudaFree(D); cudaFree(E); cudaFree(F); cudaFree(G); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&outerTime, start, stop); cout<<outerTime/1000.0<<" seconds for the subInversetree of FFT level."<<endl; /* fast evaluation */ sfixn *FpolyGpu[MAX_LEVEL]; cudaMalloc((void **)&FpolyGpu[k-1], sizeof(sfixn)*(1L << (k-1))); cudaMemcpy(FpolyGpu[k-1], Fpoly, sizeof(sfixn)*(1L << (k-1)), cudaMemcpyHostToDevice); cudaMalloc((void **)&FpolyGpu[k-2], sizeof(sfixn)*(1L << (k-2))); int lm, linv, lf; sfixn *H, *I, *J; //for(i = k-2; i >= 0; --i) //{ i = k-2; j = 1L << (k-2-i); lm = 1L << (i); linv = lm; lf = 1L << (i+1); // printGPUArray(FpolyGpu[k-1], lf); // cudaMalloc((void **)&H, sizeof(sfixn)*(lf)); blockNo = (int)ceil((double)(lf*j)/(double)(Tmax)); listReversePoly<<<blockNo, Tmax>>>(H, FpolyGpu[k-1], lf , j); // printGPUArray(H, lf); // cudaMalloc((void **)&I, sizeof(sfixn)*(lf)); cudaMemcpy(I, &(H[lm]), sizeof(sfixn)*(lm), cudaMemcpyDeviceToDevice); zeroInbetween<<<(int)ceil((double)lf/(double)(Tmax*2)), Tmax>>>(H, I, lf/2, lm ); cudaMalloc((void **)&J, sizeof(sfixn)*(lf)); blockNo = (int)ceil((double)(lf *j)/(double)(Tmax)); listPolyDegInc<<<blockNo, Tmax>>>(MinvGpu[i], J, linv, j, lf); // printGPUArray(H, lf); printGPUArray(I, lf); printGPUArray(J, lf); // w = primitive_root(i+1, p); list_stockham_dev(H, j, i+1, w, p); list_stockham_dev(I, j, i+1, w, p); list_stockham_dev(J, j, i+1, w, p); pointMul<<<(int)ceil((double)(lf)/(double)Tmax) , Tmax>>>( H, J, lf, p); pointMul<<<(int)ceil((double)(lf)/(double)Tmax) , Tmax>>>( I, J, lf, p); winv = inv_mod(w, p); list_stockham_dev(H, j, i+1, winv, p); list_stockham_dev(I, j, i+1, winv, p); ninv = inv_mod(lf, p); scalarMul<<<(int)ceil((double)(lf)/(double)Tmax) , Tmax>>>( H, ninv, lf, p); scalarMul<<<(int)ceil((double)(lf)/(double)Tmax) , Tmax>>>( I, ninv, lf, p); // printGPUArray(H, lf); printGPUArray(I, lf); // list2wayCp<<<(int)ceil((double)(linv )/(double)Tmax) , Tmax>>>( H, I, linv, linv, p); // printGPUArray(H, lf); // cudaFree(H); cudaFree(I); cudaFree(J); //} struct status trees; /* Copy the subproduct tree */ for(i = 0; i < (k-1) ; ++i) { polyLengthCurrent = 1 << (i); polyOnLayerCurrent = 1 << (k-i-2); if(i < plainMulLimit) ++polyLengthCurrent; trees.M[i] = new sfixn [polyLengthCurrent*polyOnLayerCurrent]; cudaMemcpy(trees.M[i] , Mgpu[i], sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(Mgpu[i]); // //cout<<"copying subproduct tree: "<<i<<endl; // } for(i = plainMulLimit; i < (k-1) ; ++i) { polyLengthCurrent = 1 << (i); polyOnLayerCurrent = 1 << (k-i-2); // //cout<<"copying subinverse tree: "<<i<<endl; // trees.InvM[i] = new sfixn [polyLengthCurrent*polyOnLayerCurrent]; cudaMemcpy(trees.InvM[i] , MinvGpu[i], sizeof(sfixn)*polyLengthCurrent*polyOnLayerCurrent, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(MinvGpu[i]); } return trees; }
a8fc66edd292ca84fa8d27394b7dbd0c7e68ccb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This file is part of bp-layers. // // Copyright (C) 2020 Patrick Knbelreiter <knoebelreiter at icg dot tugraz dot at> // Christian Sormann <christian dot sormann at icg dot tugraz dot at> // Institute for Computer Graphics and Vision, Graz University of Technology // https://www.tugraz.at/institute/icg/teams/team-pock/ // // bp-layers is free software: you can redistribute it and/or modify it under the // terms of the GNU Affero General Public License as published by the Free Software // Foundation, either version 3 of the License, or any later version. // // bp-layers is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. #include "../../include/error_util.h" #include "lbp_min_sum_kernel.cuh" #include "util.cuh" // ============================================================================ // CUDA KERNELS // ============================================================================ __global__ void lbp_cuda_forward_kernel_reduction_min_sum( KernelData cost, KernelData5 jump, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, const unsigned short x_in, const unsigned short direction, int shared_mem_offset, unsigned short delta) { unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; unsigned short x = 0; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; // y = y; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } unsigned int n = 0; float L2 = jump(direction, y, x, 0, jump.size4 - 1) ; unsigned short start = max(c - delta + 1, 0); unsigned short stop = min(c + delta - 1, C - 1); float edgeWeight = edges(n, direction, y, x); // write to shared memory // compute message for every label sdata[tid] = cost(n, y, x, c); // add costs from all neighbors if(direction != RIGHT) { sdata[tid] += messages(n, RIGHT, y, x, c); } if(direction != LEFT) { sdata[tid] += messages(n, LEFT, y, x, c); } if(direction != UP) { sdata[tid] += messages(n, UP, y, x, c); } if(direction != DOWN) { sdata[tid] += messages(n, DOWN, y, x, c); } float h = sdata[tid]; __syncthreads(); // save h in shared mem sdata[tid] = h; sdata[tid + shared_mem_offset] = static_cast<float>(c); __syncthreads(); // if delta is larger or equal than this threshold use old version as it is a little faster int old_version_threshold = C; float msg = 0.0; int msg_argmin = 0; // if there is no truncation use old version if(delta >= old_version_threshold) { //OLD VERSION ///////////////////// sdata[tid] = h; __syncthreads(); msg = max_float; //minVal + jump(0, 0, 0, jump.size3 - 1) * edgeWeight; msg_argmin = 0; for(unsigned short label = 0; label < C; ++label) { // compute min in local var to avoid global mem writes float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(direction, y, x, label, c) * edgeWeight; msg = fminf(msg, new_msg); if(msg == new_msg) { msg_argmin = label; } } __syncthreads(); ///////////////// } else { //TRUNC SPEED UP VERSION /////////////////////////////////// for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { //min parallel reduction float min_val = sdata[tid]; float min_label = sdata[tid + shared_mem_offset]; if(sdata[tid + s] <= sdata[tid]) { min_val = sdata[tid + s]; min_label = sdata[shared_mem_offset + tid + s]; } //min val parallel reduction sdata[tid] = min_val; //argmin prallel reduction sdata[shared_mem_offset + tid] = min_label; } __syncthreads(); } float min_h = sdata[threadIdx.x * blockDim.y]; int argmin_h = sdata[shared_mem_offset + threadIdx.x * blockDim.y]; __syncthreads(); msg = min_h + jump(direction, y, x, 0, jump.size4 - 1) * edgeWeight; msg_argmin = static_cast<int>(argmin_h); sdata[tid] = h; __syncthreads(); for(unsigned short label = start; label < stop + 1; ++label) { // compute min in local var to avoid global mem writes float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(direction, y, x, label, c) * edgeWeight; if(new_msg <= msg) { msg = new_msg; msg_argmin = label; } } __syncthreads(); ///////////////////////////// } // if(x == 2 && y == 0 && direction == DOWN) // { // printf("argmin : %i argmin h %i min_val %f min_h %f h: %f \n", msg_argmin, argmin_h, msg, min_h, h); // } // compute normalization with 2nd reduction sdata[tid] = (float)exp((double)msg); __syncthreads(); for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // normalize message double sum_exp = max((double)sdata[blockDim.y * threadIdx.x], 1e-45); float logSumExp = (float)log(sum_exp); // if(sum_exp < 1e-10) // { // printf("sum exp zero: %f , logsumexp: %f msg: %f \n", sum_exp, msg); // } //float logSumExp = 0.0; if(direction == RIGHT) { messages(n, LEFT, y, x+1, c) = msg - logSumExp; messages_argmin(n, LEFT, y, x+1, c) = msg_argmin; message_scale(n, LEFT, y, x+1) = sum_exp; } if(direction == LEFT) { messages(n, RIGHT, y, x-1, c) = msg - logSumExp; messages_argmin(n, RIGHT, y, x-1, c) = msg_argmin; message_scale(n, RIGHT, y, x-1) = sum_exp; } if(direction == UP) { messages(n, DOWN, y-1, x, c) = msg - logSumExp; messages_argmin(n, DOWN, y-1, x, c) = msg_argmin; message_scale(n, DOWN, y-1, x) = sum_exp; } if(direction == DOWN) { messages(n, UP, y+1, x, c) = msg - logSumExp; messages_argmin(n, UP, y+1, x, c) = msg_argmin; message_scale(n, UP, y+1, x) = sum_exp; } } __global__ void lbp_cuda_backward_kernel_reduction_min_sum( KernelData cost, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, KernelData5 in_grad, KernelData gradient_unary, KernelData5 gradient_pairwise, KernelData gradient_edge, KernelData gradient_accumulation, KernelData gradient_accumulation_tmp, KernelData5 saved_prev_grad_msg, const unsigned short x_in, const unsigned short direction, bool compute_cross, const unsigned int n) { //initialize utility variables unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; //unsigned int n = 0; unsigned int x; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } //calc backward message short prev_row_shift = 0; short prev_col_shift = 0; if(direction == LEFT) { prev_row_shift = 0; prev_col_shift = 1; } if(direction == RIGHT) { prev_row_shift = 0; prev_col_shift = -1; } if(direction == DOWN) { prev_row_shift = -1; prev_col_shift = 0; } if(direction == UP) { prev_row_shift = 1; prev_col_shift = 0; } int grad_xy_idx = 0; if(direction == UP) { grad_xy_idx = DOWN; } if(direction == DOWN) { grad_xy_idx = UP; } if(direction == LEFT) { grad_xy_idx = RIGHT; } if(direction == RIGHT) { grad_xy_idx = LEFT; } float edgeWeight = edges(n, grad_xy_idx, y, x); int HOR_IDX = 0; int UP_IDX = 1; int DOWN_IDX = 2; ///////////////////////in_grad normalization //////////////////////////////////////////// float original_message_val = messages(n, direction, y + prev_row_shift, x + prev_col_shift, c) + log(message_scale(n, direction, y + prev_row_shift, x + prev_col_shift)); float message_exp_sum = message_scale(n, direction, y + prev_row_shift, x + prev_col_shift); sdata[tid] = in_grad(n, direction, y + prev_row_shift, x + prev_col_shift, c); __syncthreads(); float in_grad_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //printf("tid %i label %i norm msg val %f \n", tid, label, norm_msg_val); //in_grad is in sdata in_grad_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////acc normalization //////////////////////////////////////////// sdata[tid] = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); __syncthreads(); float acc_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //in_grad is in sdata acc_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////////////// int min_index = (int)messages_argmin(n, direction, y + prev_row_shift, x + prev_col_shift, c); float additive_hor = in_grad_normalized + acc_normalized; float additive_up = 0.0; float additive_down = 0.0; if(compute_cross) { additive_up = saved_prev_grad_msg(n, UP, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, UP_IDX); additive_down = saved_prev_grad_msg(n, DOWN, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, DOWN_IDX); } // so that gradient_acc is not changed before assigning __syncthreads(); //unary gradient atomicAdd(&gradient_unary(n, y, x, min_index), additive_hor); atomicAdd(&gradient_unary(n, y, x, min_index), additive_up); atomicAdd(&gradient_unary(n, y, x, min_index), additive_down); //pairwise gradient atomicAdd(&gradient_pairwise(grad_xy_idx, y, x, min_index, c), edgeWeight * additive_hor); atomicAdd(&gradient_pairwise(grad_xy_idx, y, x, min_index, c), edgeWeight * additive_up); atomicAdd(&gradient_pairwise(grad_xy_idx, y, x, min_index, c), edgeWeight * additive_down); //edge gradient // atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(grad_xy_idx, y, x, min_index, c) * additive_hor); // atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(grad_xy_idx, y, x, min_index, c) * additive_up); // atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(grad_xy_idx, y, x, min_index, c) * additive_down); updateGradientAcc(gradient_accumulation_tmp, additive_hor, direction, n, y, x, min_index, HOR_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_up, direction, n, y, x, min_index, UP_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_down, direction, n, y, x, min_index, DOWN_IDX); __syncthreads(); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, HOR_IDX), direction, n, y, x, c, HOR_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, UP_IDX), direction, n, y, x, c, UP_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, DOWN_IDX), direction, n, y, x, c, DOWN_IDX); __syncthreads(); saved_prev_grad_msg(n, direction, y, x, c) = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); } // ============================================================================ // CPP KERNEL CALLS // ============================================================================ namespace cuda { std::vector<at::Tensor> lbp_reduction_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); //int max_iter = 2; auto options = at::TensorOptions(cost.options()); // at::Tensor messages = at::zeros({N, 4, H, W, C}, options); at::Tensor messages_argmin = at::zeros({N, 4, H, W, C}, options); at::Tensor message_scale = at::zeros({N, 4, H, W}, options); //cost = cost.permute({0, 2, 3, 1}).contiguous(); // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(::min(powf(2.0f, ::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(::ceil(H / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(::ceil(W / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); if(numBlocksLR.y != 1) { std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1=:" << numBlocksLR.y << "C=" << C << std::endl; } const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; // to Right for(unsigned short x = 0; x < W - 1; ++x) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, x, RIGHT, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } // to LEFT for(unsigned short x = W - 1; x > 0; --x) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, x, LEFT, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } // to DOWN for(unsigned short y = 0; y < H - 1; ++y) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, y, DOWN, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } // to UP for(unsigned short y = H - 1; y > 0; --y) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, y, UP, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } //auto beliefs = messages.sum({1}) + cost; std::vector<at::Tensor> output_vec; output_vec.push_back(messages); output_vec.push_back(messages_argmin); output_vec.push_back(message_scale); return output_vec; } std::vector<at::Tensor> lbp_forward_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { return lbp_reduction_min_sum(cost, jump, edge, messages, delta); } //============================================================================= // BACKWARD //============================================================================= std::vector<at::Tensor> lbp_backward_min_sum(at::Tensor cost, at::Tensor edge, at::Tensor in_grad, at::Tensor messages, at::Tensor messages_argmin, at::Tensor message_scale) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); auto options = at::TensorOptions(cost.options()); at::Tensor gradient_unary = at::zeros({N, H, W, C}, options); at::Tensor gradient_pairwise = at::zeros({4, H, W, C, C}, options); at::Tensor gradient_edge = at::zeros({N, 4, H, W}, options); at::Tensor gradient_messages = at::zeros({N, 4, H, W, C}, options); gradient_messages += in_grad; at::Tensor saved_prev_grad_msg = at::zeros({N, 4, H, W, C}, options); at::Tensor gradient_accumulation; // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(::min(powf(2.0f, ::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(::ceil(H / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(::ceil(W / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); //printf("blockDimC %i \n", blockDimC); //printf("blockDimHW %i \n", blockDimHW); if(numBlocksLR.y != 1) std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1: " << numBlocksLR.y << std::endl; const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; const float max_float = 1e15; for(int n = 0; n < N; ++n) { ////////////////////UNARY GRADIENT//////////////////////////// //to DOWN gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = 1; y < H; ++y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, DOWN, false, n); cudaSafeCall(hipGetLastError()); } // to UP gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = H - 2; y >= 0; --y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, UP, false, n); cudaSafeCall(hipGetLastError()); } // to LEFT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = W-2; x >= 0; --x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, LEFT, true, n); cudaSafeCall(hipGetLastError()); } // to RIGHT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = 1; x < W; ++x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, RIGHT, true, n); cudaSafeCall(hipGetLastError()); } } std::vector<at::Tensor> output_vec; output_vec.push_back(gradient_unary); output_vec.push_back(gradient_pairwise); output_vec.push_back(gradient_edge); output_vec.push_back(gradient_messages); return output_vec; } }
a8fc66edd292ca84fa8d27394b7dbd0c7e68ccb1.cu
// This file is part of bp-layers. // // Copyright (C) 2020 Patrick Knöbelreiter <knoebelreiter at icg dot tugraz dot at> // Christian Sormann <christian dot sormann at icg dot tugraz dot at> // Institute for Computer Graphics and Vision, Graz University of Technology // https://www.tugraz.at/institute/icg/teams/team-pock/ // // bp-layers is free software: you can redistribute it and/or modify it under the // terms of the GNU Affero General Public License as published by the Free Software // Foundation, either version 3 of the License, or any later version. // // bp-layers is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. #include "../../include/error_util.h" #include "lbp_min_sum_kernel.cuh" #include "util.cuh" // ============================================================================ // CUDA KERNELS // ============================================================================ __global__ void lbp_cuda_forward_kernel_reduction_min_sum( KernelData cost, KernelData5 jump, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, const unsigned short x_in, const unsigned short direction, int shared_mem_offset, unsigned short delta) { unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; unsigned short x = 0; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; // y = y; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } unsigned int n = 0; float L2 = jump(direction, y, x, 0, jump.size4 - 1) ; unsigned short start = max(c - delta + 1, 0); unsigned short stop = min(c + delta - 1, C - 1); float edgeWeight = edges(n, direction, y, x); // write to shared memory // compute message for every label sdata[tid] = cost(n, y, x, c); // add costs from all neighbors if(direction != RIGHT) { sdata[tid] += messages(n, RIGHT, y, x, c); } if(direction != LEFT) { sdata[tid] += messages(n, LEFT, y, x, c); } if(direction != UP) { sdata[tid] += messages(n, UP, y, x, c); } if(direction != DOWN) { sdata[tid] += messages(n, DOWN, y, x, c); } float h = sdata[tid]; __syncthreads(); // save h in shared mem sdata[tid] = h; sdata[tid + shared_mem_offset] = static_cast<float>(c); __syncthreads(); // if delta is larger or equal than this threshold use old version as it is a little faster int old_version_threshold = C; float msg = 0.0; int msg_argmin = 0; // if there is no truncation use old version if(delta >= old_version_threshold) { //OLD VERSION ///////////////////// sdata[tid] = h; __syncthreads(); msg = max_float; //minVal + jump(0, 0, 0, jump.size3 - 1) * edgeWeight; msg_argmin = 0; for(unsigned short label = 0; label < C; ++label) { // compute min in local var to avoid global mem writes float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(direction, y, x, label, c) * edgeWeight; msg = fminf(msg, new_msg); if(msg == new_msg) { msg_argmin = label; } } __syncthreads(); ///////////////// } else { //TRUNC SPEED UP VERSION /////////////////////////////////// for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { //min parallel reduction float min_val = sdata[tid]; float min_label = sdata[tid + shared_mem_offset]; if(sdata[tid + s] <= sdata[tid]) { min_val = sdata[tid + s]; min_label = sdata[shared_mem_offset + tid + s]; } //min val parallel reduction sdata[tid] = min_val; //argmin prallel reduction sdata[shared_mem_offset + tid] = min_label; } __syncthreads(); } float min_h = sdata[threadIdx.x * blockDim.y]; int argmin_h = sdata[shared_mem_offset + threadIdx.x * blockDim.y]; __syncthreads(); msg = min_h + jump(direction, y, x, 0, jump.size4 - 1) * edgeWeight; msg_argmin = static_cast<int>(argmin_h); sdata[tid] = h; __syncthreads(); for(unsigned short label = start; label < stop + 1; ++label) { // compute min in local var to avoid global mem writes float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(direction, y, x, label, c) * edgeWeight; if(new_msg <= msg) { msg = new_msg; msg_argmin = label; } } __syncthreads(); ///////////////////////////// } // if(x == 2 && y == 0 && direction == DOWN) // { // printf("argmin : %i argmin h %i min_val %f min_h %f h: %f \n", msg_argmin, argmin_h, msg, min_h, h); // } // compute normalization with 2nd reduction sdata[tid] = (float)exp((double)msg); __syncthreads(); for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // normalize message double sum_exp = max((double)sdata[blockDim.y * threadIdx.x], 1e-45); float logSumExp = (float)log(sum_exp); // if(sum_exp < 1e-10) // { // printf("sum exp zero: %f , logsumexp: %f msg: %f \n", sum_exp, msg); // } //float logSumExp = 0.0; if(direction == RIGHT) { messages(n, LEFT, y, x+1, c) = msg - logSumExp; messages_argmin(n, LEFT, y, x+1, c) = msg_argmin; message_scale(n, LEFT, y, x+1) = sum_exp; } if(direction == LEFT) { messages(n, RIGHT, y, x-1, c) = msg - logSumExp; messages_argmin(n, RIGHT, y, x-1, c) = msg_argmin; message_scale(n, RIGHT, y, x-1) = sum_exp; } if(direction == UP) { messages(n, DOWN, y-1, x, c) = msg - logSumExp; messages_argmin(n, DOWN, y-1, x, c) = msg_argmin; message_scale(n, DOWN, y-1, x) = sum_exp; } if(direction == DOWN) { messages(n, UP, y+1, x, c) = msg - logSumExp; messages_argmin(n, UP, y+1, x, c) = msg_argmin; message_scale(n, UP, y+1, x) = sum_exp; } } __global__ void lbp_cuda_backward_kernel_reduction_min_sum( KernelData cost, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, KernelData5 in_grad, KernelData gradient_unary, KernelData5 gradient_pairwise, KernelData gradient_edge, KernelData gradient_accumulation, KernelData gradient_accumulation_tmp, KernelData5 saved_prev_grad_msg, const unsigned short x_in, const unsigned short direction, bool compute_cross, const unsigned int n) { //initialize utility variables unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; //unsigned int n = 0; unsigned int x; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } //calc backward message short prev_row_shift = 0; short prev_col_shift = 0; if(direction == LEFT) { prev_row_shift = 0; prev_col_shift = 1; } if(direction == RIGHT) { prev_row_shift = 0; prev_col_shift = -1; } if(direction == DOWN) { prev_row_shift = -1; prev_col_shift = 0; } if(direction == UP) { prev_row_shift = 1; prev_col_shift = 0; } int grad_xy_idx = 0; if(direction == UP) { grad_xy_idx = DOWN; } if(direction == DOWN) { grad_xy_idx = UP; } if(direction == LEFT) { grad_xy_idx = RIGHT; } if(direction == RIGHT) { grad_xy_idx = LEFT; } float edgeWeight = edges(n, grad_xy_idx, y, x); int HOR_IDX = 0; int UP_IDX = 1; int DOWN_IDX = 2; ///////////////////////in_grad normalization //////////////////////////////////////////// float original_message_val = messages(n, direction, y + prev_row_shift, x + prev_col_shift, c) + log(message_scale(n, direction, y + prev_row_shift, x + prev_col_shift)); float message_exp_sum = message_scale(n, direction, y + prev_row_shift, x + prev_col_shift); sdata[tid] = in_grad(n, direction, y + prev_row_shift, x + prev_col_shift, c); __syncthreads(); float in_grad_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //printf("tid %i label %i norm msg val %f \n", tid, label, norm_msg_val); //in_grad is in sdata in_grad_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////acc normalization //////////////////////////////////////////// sdata[tid] = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); __syncthreads(); float acc_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //in_grad is in sdata acc_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////////////// int min_index = (int)messages_argmin(n, direction, y + prev_row_shift, x + prev_col_shift, c); float additive_hor = in_grad_normalized + acc_normalized; float additive_up = 0.0; float additive_down = 0.0; if(compute_cross) { additive_up = saved_prev_grad_msg(n, UP, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, UP_IDX); additive_down = saved_prev_grad_msg(n, DOWN, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, DOWN_IDX); } // so that gradient_acc is not changed before assigning __syncthreads(); //unary gradient atomicAdd(&gradient_unary(n, y, x, min_index), additive_hor); atomicAdd(&gradient_unary(n, y, x, min_index), additive_up); atomicAdd(&gradient_unary(n, y, x, min_index), additive_down); //pairwise gradient atomicAdd(&gradient_pairwise(grad_xy_idx, y, x, min_index, c), edgeWeight * additive_hor); atomicAdd(&gradient_pairwise(grad_xy_idx, y, x, min_index, c), edgeWeight * additive_up); atomicAdd(&gradient_pairwise(grad_xy_idx, y, x, min_index, c), edgeWeight * additive_down); //edge gradient // atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(grad_xy_idx, y, x, min_index, c) * additive_hor); // atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(grad_xy_idx, y, x, min_index, c) * additive_up); // atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(grad_xy_idx, y, x, min_index, c) * additive_down); updateGradientAcc(gradient_accumulation_tmp, additive_hor, direction, n, y, x, min_index, HOR_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_up, direction, n, y, x, min_index, UP_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_down, direction, n, y, x, min_index, DOWN_IDX); __syncthreads(); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, HOR_IDX), direction, n, y, x, c, HOR_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, UP_IDX), direction, n, y, x, c, UP_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, DOWN_IDX), direction, n, y, x, c, DOWN_IDX); __syncthreads(); saved_prev_grad_msg(n, direction, y, x, c) = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); } // ============================================================================ // CPP KERNEL CALLS // ============================================================================ namespace cuda { std::vector<at::Tensor> lbp_reduction_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); //int max_iter = 2; auto options = at::TensorOptions(cost.options()); // at::Tensor messages = at::zeros({N, 4, H, W, C}, options); at::Tensor messages_argmin = at::zeros({N, 4, H, W, C}, options); at::Tensor message_scale = at::zeros({N, 4, H, W}, options); //cost = cost.permute({0, 2, 3, 1}).contiguous(); // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(std::min(powf(2.0f, std::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(std::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(std::ceil(H / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(std::ceil(W / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); if(numBlocksLR.y != 1) { std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1=:" << numBlocksLR.y << "C=" << C << std::endl; } const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; // to Right for(unsigned short x = 0; x < W - 1; ++x) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, x, RIGHT, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } // to LEFT for(unsigned short x = W - 1; x > 0; --x) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, x, LEFT, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } // to DOWN for(unsigned short y = 0; y < H - 1; ++y) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, y, DOWN, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } // to UP for(unsigned short y = H - 1; y > 0; --y) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, y, UP, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } //auto beliefs = messages.sum({1}) + cost; std::vector<at::Tensor> output_vec; output_vec.push_back(messages); output_vec.push_back(messages_argmin); output_vec.push_back(message_scale); return output_vec; } std::vector<at::Tensor> lbp_forward_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { return lbp_reduction_min_sum(cost, jump, edge, messages, delta); } //============================================================================= // BACKWARD //============================================================================= std::vector<at::Tensor> lbp_backward_min_sum(at::Tensor cost, at::Tensor edge, at::Tensor in_grad, at::Tensor messages, at::Tensor messages_argmin, at::Tensor message_scale) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); auto options = at::TensorOptions(cost.options()); at::Tensor gradient_unary = at::zeros({N, H, W, C}, options); at::Tensor gradient_pairwise = at::zeros({4, H, W, C, C}, options); at::Tensor gradient_edge = at::zeros({N, 4, H, W}, options); at::Tensor gradient_messages = at::zeros({N, 4, H, W, C}, options); gradient_messages += in_grad; at::Tensor saved_prev_grad_msg = at::zeros({N, 4, H, W, C}, options); at::Tensor gradient_accumulation; // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(std::min(powf(2.0f, std::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(std::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(std::ceil(H / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(std::ceil(W / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); //printf("blockDimC %i \n", blockDimC); //printf("blockDimHW %i \n", blockDimHW); if(numBlocksLR.y != 1) std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1: " << numBlocksLR.y << std::endl; const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; const float max_float = 1e15; for(int n = 0; n < N; ++n) { ////////////////////UNARY GRADIENT//////////////////////////// //to DOWN gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = 1; y < H; ++y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, threadsPerBlock * sizeof(float)>>>(cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, DOWN, false, n); cudaSafeCall(cudaGetLastError()); } // to UP gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = H - 2; y >= 0; --y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, threadsPerBlock * sizeof(float)>>>(cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, UP, false, n); cudaSafeCall(cudaGetLastError()); } // to LEFT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = W-2; x >= 0; --x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, threadsPerBlock * sizeof(float)>>>(cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, LEFT, true, n); cudaSafeCall(cudaGetLastError()); } // to RIGHT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = 1; x < W; ++x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, threadsPerBlock * sizeof(float)>>>(cost, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, RIGHT, true, n); cudaSafeCall(cudaGetLastError()); } } std::vector<at::Tensor> output_vec; output_vec.push_back(gradient_unary); output_vec.push_back(gradient_pairwise); output_vec.push_back(gradient_edge); output_vec.push_back(gradient_messages); return output_vec; } }
41a9ef2d56120149ead418b95d768e58ebff77c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> #include <math.h> <<<<<<< Updated upstream const long N = pow(2,30); ======= const int N = 1024; >>>>>>> Stashed changes const int blocksize = 16; __global__ void GPU_multi(long *a, long *b) { a[threadIdx.x] *= b[threadIdx.x]; } void CPU_multi(long *a, long *b) { for(long i = 0; i<N; i++){ a[i] *= b[i]; } } int main() { const long isize = N*sizeof(long); long *a = (long*) malloc(N*sizeof(long)); long *b = (long*) malloc(N*sizeof(long)); long *ad; long *bd; double cdiff = 0.0; double gdiff = 0.0; double gmdiff = 0.0; clock_t mstart, mstop, start, stop; for(long i = 0; i < N; i++){ a[i] = i; b[i] = i; // printf("%d ",a[i]); } //printf("\n"); mstart = clock(); hipMalloc( (void**)&ad, isize ); hipMalloc( (void**)&bd, isize ); hipMemcpy( ad, a, isize, hipMemcpyHostToDevice ); hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); start = clock(); hipLaunchKernelGGL(( GPU_multi), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd); stop = clock(); hipMemcpy( a, ad, isize, hipMemcpyDeviceToHost ); hipFree( ad ); hipFree( bd ); mstop = clock(); // for(int i = 0; i < N; i++){ // printf("%d ",a[i]); // } // printf("\n"); gdiff = (double) (stop - start)/CLOCKS_PER_SEC; gmdiff = (double) (mstop - mstart)/CLOCKS_PER_SEC ; for(long i = 0; i < N; i++){ a[i] = i; b[i] = i; // printf("%d ",a[i]); } // printf("\n"); start = clock(); CPU_multi(a,b); //for(int i = 0; i < N; i++){ // printf("%d ",a[i]); //} //printf("\n"); stop = clock(); cdiff = (double) (stop - start)/CLOCKS_PER_SEC; printf("Completed GPU multiplication of %ld in %.8f seconds\n", N, gdiff); printf("Completed CPU multiplication of %ld in %.8f seconds\n", N, cdiff); printf("GPU Memory moving time done in %.8f seconds\n", gmdiff); free(a); free(b); return EXIT_SUCCESS; }
41a9ef2d56120149ead418b95d768e58ebff77c9.cu
#include <stdio.h> #include <time.h> #include <math.h> <<<<<<< Updated upstream const long N = pow(2,30); ======= const int N = 1024; >>>>>>> Stashed changes const int blocksize = 16; __global__ void GPU_multi(long *a, long *b) { a[threadIdx.x] *= b[threadIdx.x]; } void CPU_multi(long *a, long *b) { for(long i = 0; i<N; i++){ a[i] *= b[i]; } } int main() { const long isize = N*sizeof(long); long *a = (long*) malloc(N*sizeof(long)); long *b = (long*) malloc(N*sizeof(long)); long *ad; long *bd; double cdiff = 0.0; double gdiff = 0.0; double gmdiff = 0.0; clock_t mstart, mstop, start, stop; for(long i = 0; i < N; i++){ a[i] = i; b[i] = i; // printf("%d ",a[i]); } //printf("\n"); mstart = clock(); cudaMalloc( (void**)&ad, isize ); cudaMalloc( (void**)&bd, isize ); cudaMemcpy( ad, a, isize, cudaMemcpyHostToDevice ); cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); start = clock(); GPU_multi<<<dimGrid, dimBlock>>>(ad, bd); stop = clock(); cudaMemcpy( a, ad, isize, cudaMemcpyDeviceToHost ); cudaFree( ad ); cudaFree( bd ); mstop = clock(); // for(int i = 0; i < N; i++){ // printf("%d ",a[i]); // } // printf("\n"); gdiff = (double) (stop - start)/CLOCKS_PER_SEC; gmdiff = (double) (mstop - mstart)/CLOCKS_PER_SEC ; for(long i = 0; i < N; i++){ a[i] = i; b[i] = i; // printf("%d ",a[i]); } // printf("\n"); start = clock(); CPU_multi(a,b); //for(int i = 0; i < N; i++){ // printf("%d ",a[i]); //} //printf("\n"); stop = clock(); cdiff = (double) (stop - start)/CLOCKS_PER_SEC; printf("Completed GPU multiplication of %ld in %.8f seconds\n", N, gdiff); printf("Completed CPU multiplication of %ld in %.8f seconds\n", N, cdiff); printf("GPU Memory moving time done in %.8f seconds\n", gmdiff); free(a); free(b); return EXIT_SUCCESS; }
4b58782f4e21f46a971849dc44c6a254bb451bf6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
4b58782f4e21f46a971849dc44c6a254bb451bf6.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
c9f794195f9298f79bf0cf4b6db52f54e6e86338.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017 XGBoost contributors */ #include <thrust/device_vector.h> #include <xgboost/base.h> #include "../helpers.h" #include "gtest/gtest.h" #include "../../../src/data/sparse_page_source.h" #include "../../../src/gbm/gbtree_model.h" #include "../../../src/tree/updater_gpu_hist.cu" #include "../../../src/common/common.h" namespace xgboost { namespace tree { TEST(gpu_hist_experimental, TestSparseShard) { int rows = 100; int columns = 80; int max_bins = 4; auto dmat = CreateDMatrix(rows, columns, 0.9f); common::GHistIndexMatrix gmat; gmat.Init((*dmat).get(),max_bins); TrainParam p; p.max_depth = 6; dmlc::DataIter<SparsePage>* iter = (*dmat)->RowIterator(); iter->BeforeFirst(); CHECK(iter->Next()); const SparsePage& batch = iter->Value(); DeviceShard shard(0, 0, 0, rows, p); shard.InitRowPtrs(batch); shard.InitCompressedData(gmat.cut, batch); CHECK(!iter->Next()); ASSERT_LT(shard.row_stride, columns); auto host_gidx_buffer = shard.gidx_buffer.AsVector(); common::CompressedIterator<uint32_t> gidx(host_gidx_buffer.data(), gmat.cut.row_ptr.back() + 1); for (int i = 0; i < rows; i++) { int row_offset = 0; for (auto j = gmat.row_ptr[i]; j < gmat.row_ptr[i + 1]; j++) { ASSERT_EQ(gidx[i * shard.row_stride + row_offset], gmat.index[j]); row_offset++; } for (; row_offset < shard.row_stride; row_offset++) { ASSERT_EQ(gidx[i * shard.row_stride + row_offset], shard.null_gidx_value); } } delete dmat; } TEST(gpu_hist_experimental, TestDenseShard) { int rows = 100; int columns = 80; int max_bins = 4; auto dmat = CreateDMatrix(rows, columns, 0); common::GHistIndexMatrix gmat; gmat.Init((*dmat).get(),max_bins); TrainParam p; p.max_depth = 6; dmlc::DataIter<SparsePage>* iter = (*dmat)->RowIterator(); iter->BeforeFirst(); CHECK(iter->Next()); const SparsePage& batch = iter->Value(); DeviceShard shard(0, 0, 0, rows, p); shard.InitRowPtrs(batch); shard.InitCompressedData(gmat.cut, batch); CHECK(!iter->Next()); ASSERT_EQ(shard.row_stride, columns); auto host_gidx_buffer = shard.gidx_buffer.AsVector(); common::CompressedIterator<uint32_t> gidx(host_gidx_buffer.data(), gmat.cut.row_ptr.back() + 1); for (int i = 0; i < gmat.index.size(); i++) { ASSERT_EQ(gidx[i], gmat.index[i]); } delete dmat; } TEST(gpu_hist_experimental, MGPU_mock) { // Attempt to choose multiple GPU devices int ngpu; dh::safe_cuda(hipGetDeviceCount(&ngpu)); CHECK_GT(ngpu, 1); for (int i = 0; i < ngpu; ++i) { dh::safe_cuda(hipSetDevice(i)); } } } // namespace tree } // namespace xgboost
c9f794195f9298f79bf0cf4b6db52f54e6e86338.cu
/*! * Copyright 2017 XGBoost contributors */ #include <thrust/device_vector.h> #include <xgboost/base.h> #include "../helpers.h" #include "gtest/gtest.h" #include "../../../src/data/sparse_page_source.h" #include "../../../src/gbm/gbtree_model.h" #include "../../../src/tree/updater_gpu_hist.cu" #include "../../../src/common/common.h" namespace xgboost { namespace tree { TEST(gpu_hist_experimental, TestSparseShard) { int rows = 100; int columns = 80; int max_bins = 4; auto dmat = CreateDMatrix(rows, columns, 0.9f); common::GHistIndexMatrix gmat; gmat.Init((*dmat).get(),max_bins); TrainParam p; p.max_depth = 6; dmlc::DataIter<SparsePage>* iter = (*dmat)->RowIterator(); iter->BeforeFirst(); CHECK(iter->Next()); const SparsePage& batch = iter->Value(); DeviceShard shard(0, 0, 0, rows, p); shard.InitRowPtrs(batch); shard.InitCompressedData(gmat.cut, batch); CHECK(!iter->Next()); ASSERT_LT(shard.row_stride, columns); auto host_gidx_buffer = shard.gidx_buffer.AsVector(); common::CompressedIterator<uint32_t> gidx(host_gidx_buffer.data(), gmat.cut.row_ptr.back() + 1); for (int i = 0; i < rows; i++) { int row_offset = 0; for (auto j = gmat.row_ptr[i]; j < gmat.row_ptr[i + 1]; j++) { ASSERT_EQ(gidx[i * shard.row_stride + row_offset], gmat.index[j]); row_offset++; } for (; row_offset < shard.row_stride; row_offset++) { ASSERT_EQ(gidx[i * shard.row_stride + row_offset], shard.null_gidx_value); } } delete dmat; } TEST(gpu_hist_experimental, TestDenseShard) { int rows = 100; int columns = 80; int max_bins = 4; auto dmat = CreateDMatrix(rows, columns, 0); common::GHistIndexMatrix gmat; gmat.Init((*dmat).get(),max_bins); TrainParam p; p.max_depth = 6; dmlc::DataIter<SparsePage>* iter = (*dmat)->RowIterator(); iter->BeforeFirst(); CHECK(iter->Next()); const SparsePage& batch = iter->Value(); DeviceShard shard(0, 0, 0, rows, p); shard.InitRowPtrs(batch); shard.InitCompressedData(gmat.cut, batch); CHECK(!iter->Next()); ASSERT_EQ(shard.row_stride, columns); auto host_gidx_buffer = shard.gidx_buffer.AsVector(); common::CompressedIterator<uint32_t> gidx(host_gidx_buffer.data(), gmat.cut.row_ptr.back() + 1); for (int i = 0; i < gmat.index.size(); i++) { ASSERT_EQ(gidx[i], gmat.index[i]); } delete dmat; } TEST(gpu_hist_experimental, MGPU_mock) { // Attempt to choose multiple GPU devices int ngpu; dh::safe_cuda(cudaGetDeviceCount(&ngpu)); CHECK_GT(ngpu, 1); for (int i = 0; i < ngpu; ++i) { dh::safe_cuda(cudaSetDevice(i)); } } } // namespace tree } // namespace xgboost
d0649d5639d5c3bdfb0b8e4acf3e99d881bccc37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ex1.h" #define HIST_LENGTH 256 #define IMG_SIZE IMG_WIDTH*IMG_HEIGHT __device__ void prefix_sum(int arr[], int arr_size) { int threadID = threadIdx.x; int offset = 1; int last = arr[arr_size-1]; for(int level = arr_size / 2; level > 0; level /= 2) { if(threadID < level) { arr[offset * (2 * threadID + 2) - 1] += arr[offset * (2 * threadID + 1) - 1]; } offset *= 2; __syncthreads(); } if(threadID == 0) { arr[arr_size - 1] = 0; } for(int level = 1; level < arr_size; level *= 2) { offset /= 2; __syncthreads(); if(threadID < level) { int temp = arr[offset * (2 * threadID + 1) - 1]; arr[offset * (2 * threadID + 1) - 1] = arr[offset * (2 * threadID + 2) - 1]; arr[offset * (2 * threadID + 2) - 1] += temp; } } __syncthreads(); if(threadID == 0){ for(int i=0; i<arr_size-1;i++){ arr[i]=arr[i+1]; } arr[arr_size-1]= arr[arr_size-1]+last; } return; } //serial /*__global__ void process_image_kernel(uchar *all_in, uchar *all_out) { __shared__ int hist[HIST_LENGTH]; int threadID = threadIdx.x; int blockSize = blockDim.x; if(threadID < HIST_LENGTH) { hist[threadID] = 0; } // Create the histogram for(int i = threadID; i < IMG_SIZE; i += blockSize) { atomicAdd(&hist[all_in[i]], 1); } __syncthreads(); // Create the CDF prefix_sum(hist, HIST_LENGTH); __syncthreads(); // Create the map if(threadID < HIST_LENGTH) { hist[threadID] = (HIST_LENGTH / N_COLORS) * (int)(N_COLORS * (float)hist[threadID] / (IMG_WIDTH * IMG_HEIGHT)); } __syncthreads(); // Compute the new image for(int i = threadID; i < IMG_SIZE; i += blockSize) { all_out[i] = hist[all_in[i]]; } return; }*/ //block __global__ void process_image_kernel(uchar *all_in, uchar *all_out) { __shared__ int hist[HIST_LENGTH]; int threadID = threadIdx.x; int blockID = blockIdx.x; int blockSize = blockDim.x; if(threadID < HIST_LENGTH) { hist[threadID] = 0; } __syncthreads(); // Create the histogram for(int i = threadID; i < IMG_SIZE; i += blockSize) { atomicAdd(&hist[all_in[i + blockID*IMG_SIZE]], 1); } __syncthreads(); // Create the CDF prefix_sum(hist, HIST_LENGTH); __syncthreads(); // Create the map if(threadID < HIST_LENGTH) { hist[threadID] = (HIST_LENGTH / N_COLORS) * (int)(N_COLORS * (float)hist[threadID] / (IMG_SIZE)); } __syncthreads(); // Compute the new image for(int i = threadID; i < IMG_SIZE; i += blockSize) { all_out[i + blockID*IMG_SIZE] = hist[all_in[i + blockID*IMG_SIZE]]; } return; } /* Task serial context struct with necessary CPU / GPU pointers to process a single image */ struct task_serial_context { uchar *all_in, *all_out; }; /* Allocate GPU memory for a single input image and a single output image. * * Returns: allocated and initialized task_serial_context. */ struct task_serial_context *task_serial_init() { auto context = new task_serial_context; CUDA_CHECK(hipMalloc((void**)&context->all_in, IMG_SIZE*sizeof(uchar))); CUDA_CHECK(hipMalloc((void**)&context->all_out, IMG_SIZE*sizeof(uchar))); return context; } /* Process all the images in the given host array and return the output in the * provided output host array */ void task_serial_process(struct task_serial_context *context, uchar *images_in, uchar *images_out) { for(int imageIdx = 0; imageIdx < N_IMAGES; imageIdx++) { CUDA_CHECK(hipMemcpy((void*)context->all_in, (void*)(images_in + imageIdx * IMG_SIZE), IMG_SIZE*sizeof(uchar), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( process_image_kernel), dim3(1), dim3(1024), 0, 0, context->all_in, context->all_out); CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipMemcpy((void*)(images_out + imageIdx * IMG_SIZE), (void*)context->all_out, IMG_SIZE*sizeof(uchar), hipMemcpyDeviceToHost)); } } /* Release allocated resources for the task-serial implementation. */ void task_serial_free(struct task_serial_context *context) { CUDA_CHECK(hipFree((void*)context->all_in)); CUDA_CHECK(hipFree((void*)context->all_out)); free(context); } /* Bulk GPU context struct with necessary CPU / GPU pointers to process all the images */ struct gpu_bulk_context { uchar *all_in, *all_out; }; /* Allocate GPU memory for all the input and output images. * * Returns: allocated and initialized gpu_bulk_context. */ struct gpu_bulk_context *gpu_bulk_init() { auto context = new gpu_bulk_context; CUDA_CHECK(hipMalloc((void**)&context->all_in, N_IMAGES*IMG_SIZE*sizeof(uchar))); CUDA_CHECK(hipMalloc((void**)&context->all_out, N_IMAGES*IMG_SIZE*sizeof(uchar))); return context; } /* Process all the images in the given host array and return the output in the * provided output host array */ void gpu_bulk_process(struct gpu_bulk_context *context, uchar *images_in, uchar *images_out) { CUDA_CHECK(hipMemcpy((void*)context->all_in, (void*)(images_in), N_IMAGES*IMG_SIZE*sizeof(uchar), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( process_image_kernel), dim3(N_IMAGES), dim3(1024), 0, 0, context->all_in, context->all_out); CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipMemcpy((void*)(images_out), (void*)context->all_out, N_IMAGES*IMG_SIZE*sizeof(uchar), hipMemcpyDeviceToHost)); } /* Release allocated resources for the bulk GPU implementation. */ void gpu_bulk_free(struct gpu_bulk_context *context) { CUDA_CHECK(hipFree((void*)context->all_in)); CUDA_CHECK(hipFree((void*)context->all_out)); free(context); }
d0649d5639d5c3bdfb0b8e4acf3e99d881bccc37.cu
#include "ex1.h" #define HIST_LENGTH 256 #define IMG_SIZE IMG_WIDTH*IMG_HEIGHT __device__ void prefix_sum(int arr[], int arr_size) { int threadID = threadIdx.x; int offset = 1; int last = arr[arr_size-1]; for(int level = arr_size / 2; level > 0; level /= 2) { if(threadID < level) { arr[offset * (2 * threadID + 2) - 1] += arr[offset * (2 * threadID + 1) - 1]; } offset *= 2; __syncthreads(); } if(threadID == 0) { arr[arr_size - 1] = 0; } for(int level = 1; level < arr_size; level *= 2) { offset /= 2; __syncthreads(); if(threadID < level) { int temp = arr[offset * (2 * threadID + 1) - 1]; arr[offset * (2 * threadID + 1) - 1] = arr[offset * (2 * threadID + 2) - 1]; arr[offset * (2 * threadID + 2) - 1] += temp; } } __syncthreads(); if(threadID == 0){ for(int i=0; i<arr_size-1;i++){ arr[i]=arr[i+1]; } arr[arr_size-1]= arr[arr_size-1]+last; } return; } //serial /*__global__ void process_image_kernel(uchar *all_in, uchar *all_out) { __shared__ int hist[HIST_LENGTH]; int threadID = threadIdx.x; int blockSize = blockDim.x; if(threadID < HIST_LENGTH) { hist[threadID] = 0; } // Create the histogram for(int i = threadID; i < IMG_SIZE; i += blockSize) { atomicAdd(&hist[all_in[i]], 1); } __syncthreads(); // Create the CDF prefix_sum(hist, HIST_LENGTH); __syncthreads(); // Create the map if(threadID < HIST_LENGTH) { hist[threadID] = (HIST_LENGTH / N_COLORS) * (int)(N_COLORS * (float)hist[threadID] / (IMG_WIDTH * IMG_HEIGHT)); } __syncthreads(); // Compute the new image for(int i = threadID; i < IMG_SIZE; i += blockSize) { all_out[i] = hist[all_in[i]]; } return; }*/ //block __global__ void process_image_kernel(uchar *all_in, uchar *all_out) { __shared__ int hist[HIST_LENGTH]; int threadID = threadIdx.x; int blockID = blockIdx.x; int blockSize = blockDim.x; if(threadID < HIST_LENGTH) { hist[threadID] = 0; } __syncthreads(); // Create the histogram for(int i = threadID; i < IMG_SIZE; i += blockSize) { atomicAdd(&hist[all_in[i + blockID*IMG_SIZE]], 1); } __syncthreads(); // Create the CDF prefix_sum(hist, HIST_LENGTH); __syncthreads(); // Create the map if(threadID < HIST_LENGTH) { hist[threadID] = (HIST_LENGTH / N_COLORS) * (int)(N_COLORS * (float)hist[threadID] / (IMG_SIZE)); } __syncthreads(); // Compute the new image for(int i = threadID; i < IMG_SIZE; i += blockSize) { all_out[i + blockID*IMG_SIZE] = hist[all_in[i + blockID*IMG_SIZE]]; } return; } /* Task serial context struct with necessary CPU / GPU pointers to process a single image */ struct task_serial_context { uchar *all_in, *all_out; }; /* Allocate GPU memory for a single input image and a single output image. * * Returns: allocated and initialized task_serial_context. */ struct task_serial_context *task_serial_init() { auto context = new task_serial_context; CUDA_CHECK(cudaMalloc((void**)&context->all_in, IMG_SIZE*sizeof(uchar))); CUDA_CHECK(cudaMalloc((void**)&context->all_out, IMG_SIZE*sizeof(uchar))); return context; } /* Process all the images in the given host array and return the output in the * provided output host array */ void task_serial_process(struct task_serial_context *context, uchar *images_in, uchar *images_out) { for(int imageIdx = 0; imageIdx < N_IMAGES; imageIdx++) { CUDA_CHECK(cudaMemcpy((void*)context->all_in, (void*)(images_in + imageIdx * IMG_SIZE), IMG_SIZE*sizeof(uchar), cudaMemcpyHostToDevice)); process_image_kernel<<<1, 1024>>>(context->all_in, context->all_out); CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaMemcpy((void*)(images_out + imageIdx * IMG_SIZE), (void*)context->all_out, IMG_SIZE*sizeof(uchar), cudaMemcpyDeviceToHost)); } } /* Release allocated resources for the task-serial implementation. */ void task_serial_free(struct task_serial_context *context) { CUDA_CHECK(cudaFree((void*)context->all_in)); CUDA_CHECK(cudaFree((void*)context->all_out)); free(context); } /* Bulk GPU context struct with necessary CPU / GPU pointers to process all the images */ struct gpu_bulk_context { uchar *all_in, *all_out; }; /* Allocate GPU memory for all the input and output images. * * Returns: allocated and initialized gpu_bulk_context. */ struct gpu_bulk_context *gpu_bulk_init() { auto context = new gpu_bulk_context; CUDA_CHECK(cudaMalloc((void**)&context->all_in, N_IMAGES*IMG_SIZE*sizeof(uchar))); CUDA_CHECK(cudaMalloc((void**)&context->all_out, N_IMAGES*IMG_SIZE*sizeof(uchar))); return context; } /* Process all the images in the given host array and return the output in the * provided output host array */ void gpu_bulk_process(struct gpu_bulk_context *context, uchar *images_in, uchar *images_out) { CUDA_CHECK(cudaMemcpy((void*)context->all_in, (void*)(images_in), N_IMAGES*IMG_SIZE*sizeof(uchar), cudaMemcpyHostToDevice)); process_image_kernel<<<N_IMAGES, 1024>>>(context->all_in, context->all_out); CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaMemcpy((void*)(images_out), (void*)context->all_out, N_IMAGES*IMG_SIZE*sizeof(uchar), cudaMemcpyDeviceToHost)); } /* Release allocated resources for the bulk GPU implementation. */ void gpu_bulk_free(struct gpu_bulk_context *context) { CUDA_CHECK(cudaFree((void*)context->all_in)); CUDA_CHECK(cudaFree((void*)context->all_out)); free(context); }
0d4492a8200c4defe9fe275167e72b006afe52dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %d %s %s %d\n", code, hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void vecAddKernel(float *A, float *B, float *C, int n) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) C[i] = A[i] + B[i]; } void vecAdd(float *h_arrayA, float *h_arrayB, float *h_arrayC, int n) { hipLaunchKernelGGL(( vecAddKernel), dim3(65535), dim3(1024), 0, 0, h_arrayA, h_arrayB, h_arrayC, n); hipDeviceSynchronize(); } int main() { unsigned long long size = 1 << 28; cout << size << endl; float *arrayA; float *arrayB; float *arrayC; gpuErrchk(hipMallocManaged(&arrayA, size * sizeof(float))); gpuErrchk(hipMallocManaged(&arrayB, size * sizeof(float))); gpuErrchk(hipMallocManaged(&arrayC, size * sizeof(float))); for (int i = 0; i < size; i++) { arrayA[i] = 1.0f; arrayB[i] = 2.0f; } vecAdd(arrayA, arrayB, arrayC, size); cout << arrayC[0] << ' '<<arrayC[size - 1 ] << endl; hipFree(arrayA); hipFree(arrayB); hipFree(arrayC); }
0d4492a8200c4defe9fe275167e72b006afe52dc.cu
#include <iostream> using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %d %s %s %d\n", code, cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void vecAddKernel(float *A, float *B, float *C, int n) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) C[i] = A[i] + B[i]; } void vecAdd(float *h_arrayA, float *h_arrayB, float *h_arrayC, int n) { vecAddKernel<<<65535, 1024>>>(h_arrayA, h_arrayB, h_arrayC, n); cudaDeviceSynchronize(); } int main() { unsigned long long size = 1 << 28; cout << size << endl; float *arrayA; float *arrayB; float *arrayC; gpuErrchk(cudaMallocManaged(&arrayA, size * sizeof(float))); gpuErrchk(cudaMallocManaged(&arrayB, size * sizeof(float))); gpuErrchk(cudaMallocManaged(&arrayC, size * sizeof(float))); for (int i = 0; i < size; i++) { arrayA[i] = 1.0f; arrayB[i] = 2.0f; } vecAdd(arrayA, arrayB, arrayC, size); cout << arrayC[0] << ' '<<arrayC[size - 1 ] << endl; cudaFree(arrayA); cudaFree(arrayB); cudaFree(arrayC); }
05486389c5b9088386ba036f42af5f14c0875357.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #ifdef GLOBAL __device__ char x = 0; #endif __global__ void racey_kernel() { #ifdef SHARED __shared__ char x; #endif if (threadIdx.x == 0 && blockIdx.x == 0) #ifdef WW x = threadIdx.x + blockIdx.x; #elif RW volatile char c = x; #endif __syncthreads(); if (threadIdx.x == 32 || blockIdx.x == 1) x = threadIdx.x; } int main() { hipLaunchKernelGGL(( racey_kernel), dim3(BLOCKS),dim3(THREADS), 0, 0, ); hipDeviceReset(); return 0; }
05486389c5b9088386ba036f42af5f14c0875357.cu
#include <stdio.h> #ifdef GLOBAL __device__ char x = 0; #endif __global__ void racey_kernel() { #ifdef SHARED __shared__ char x; #endif if (threadIdx.x == 0 && blockIdx.x == 0) #ifdef WW x = threadIdx.x + blockIdx.x; #elif RW volatile char c = x; #endif __syncthreads(); if (threadIdx.x == 32 || blockIdx.x == 1) x = threadIdx.x; } int main() { racey_kernel<<<BLOCKS,THREADS>>>(); cudaDeviceReset(); return 0; }
24d573b4dcf0621f8ce1bd754239229a3a7e4fb7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zgedensereimsplit.cu, normal z -> d, Tue Aug 30 09:38:46 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void dgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, double * A, double * ReA, double * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_D_MAKE( MAGMA_D_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_D_MAKE( MAGMA_D_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_d_matrix input matrix A. @param[out] ReA magma_d_matrix* output matrix contaning real contributions. @param[out] ImA magma_d_matrix* output matrix contaning real contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgedensereimsplit( magma_d_matrix A, magma_d_matrix *ReA, magma_d_matrix *ImA, magma_queue_t queue ) { magma_dmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_dmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
24d573b4dcf0621f8ce1bd754239229a3a7e4fb7.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zgedensereimsplit.cu, normal z -> d, Tue Aug 30 09:38:46 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void dgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, double * A, double * ReA, double * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_D_MAKE( MAGMA_D_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_D_MAKE( MAGMA_D_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_d_matrix input matrix A. @param[out] ReA magma_d_matrix* output matrix contaning real contributions. @param[out] ImA magma_d_matrix* output matrix contaning real contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgedensereimsplit( magma_d_matrix A, magma_d_matrix *ReA, magma_d_matrix *ImA, magma_queue_t queue ) { magma_dmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_dmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; dgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
83e305ce7e9ae1c1b2ecce6d602520637b2e2a2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <math.h> #include <time.h> #define MINVAL 0.00 #define MAXVAL 10.0 #define TOL 1e-5 #define NUM_THREADS 16 double CPS = 2.9e9; int LEN; // to be defined via cmd args //////////////////////////// CUDA RELATED //////////////////////////////////// // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void MMM_kernel(float* A, float* B, float* dst, int len) { __shared__ float Ms [NUM_THREADS][NUM_THREADS]; __shared__ float Ns [NUM_THREADS][NUM_THREADS]; int bx, by, tx, ty, row, col; bx = blockIdx.x; by = blockIdx.y; tx = threadIdx.x; ty = threadIdx.y; row = by * NUM_THREADS + ty; col = bx * NUM_THREADS + tx; float partial = 0; for(int k = 0; k < len/NUM_THREADS; k++) { Ms[ty][tx] = A[row * len + (k * NUM_THREADS + tx)]; Ns[ty][tx] = B[col + (k * NUM_THREADS + ty) * len]; __syncthreads(); for(int r = 0; r < NUM_THREADS; r++) partial += Ms[ty][r] * Ns[r][tx]; __syncthreads(); } dst[row * len + col] = partial; } ////////////////////////////// MATRIX ///////////////////////////////////////// float* matrix_create(int len); int matrix_init(float* mat, int len); int matrix_zero(float* mat, int len); int matrix_copy(float* src, float* dst, int len); void MMM_CPU(float* A, float* B, float* dst, int len); ///////////////// Time related ////////////////////////////// //rdtsc related typedef union { unsigned long long int64; struct {unsigned int lo, hi;} int32; } mcps_tctr; #define MCPS_RDTSC(cpu_c) __asm__ __volatile__ ("rdtsc" : \ "=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi)) int clock_gettime(clockid_t clk_id, struct timespec *tp); struct timespec diff(struct timespec start, struct timespec end); double ts_ms(struct timespec ts); struct timespec ts_diff(struct timespec start, struct timespec end); double measure_cps(void); //////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { if(argc != 2) { printf("\nPlease pass a length in.\n"); return 0; } LEN = strtol(argv[1], NULL, 10); if(LEN <= 0) { printf("\nLength must be greater than zero\n"); return 0; } int size = LEN * LEN * sizeof(float); int NUM_BLOCKS = LEN / NUM_THREADS; if(LEN % NUM_THREADS != 0) // die if not a good fit { printf("\nOdd Numbr of blocks\n"); return 0; } // CUDA Timing hipEvent_t start_full, start_mmm, stop_full, stop_mmm; float d_time_full, d_time_mmm; // CPU Timing struct timespec time1, time2; double h_time; // CPU set up float *h_A, *h_B, *h_dst_gpu, *h_dst_cpu, *d_A, *d_B, *d_dst; measure_cps(); h_A = matrix_create(LEN); if(!h_A) return 0; if(!matrix_init(h_A, LEN)) return 0; h_B = matrix_create(LEN); if(!h_B) return 0; if(!matrix_init(h_B, LEN)) return 0; h_dst_cpu = matrix_create(LEN); // cpu result if(!h_dst_cpu) return 0; if(!matrix_zero(h_dst_cpu, LEN)) return 0; h_dst_gpu = matrix_create(LEN); // gpu result if(!h_dst_gpu) return 0; if(!matrix_zero(h_dst_gpu, LEN)) return 0; // GPU Set up d_A = NULL; d_B = NULL; d_dst = NULL; CUDA_SAFE_CALL(hipSetDevice(0)); CUDA_SAFE_CALL(hipMalloc((void**)&d_A, size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_B, size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_dst, size)); hipEventCreate(&start_full); hipEventCreate(&start_mmm); hipEventCreate(&stop_full); hipEventCreate(&stop_mmm); // start the GPU calculations dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1); dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS, 1); hipEventRecord(start_full,0); CUDA_SAFE_CALL(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice)); hipEventRecord(start_mmm,0); hipLaunchKernelGGL(( MMM_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_dst, LEN); hipEventRecord(stop_mmm,0); hipEventSynchronize(stop_mmm); CUDA_SAFE_CALL(hipPeekAtLastError()); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipMemcpy(h_dst_gpu, d_dst, size, hipMemcpyDeviceToHost)); hipEventRecord(stop_full, 0); hipEventSynchronize(stop_full); hipEventElapsedTime(&d_time_mmm, start_mmm, stop_mmm); hipEventElapsedTime(&d_time_full, start_full, stop_full); printf("\nGPU MMM Time: %f ms", d_time_mmm); printf("\nGPU FUll Time: %f ms", d_time_full); hipEventDestroy(start_full); hipEventDestroy(stop_full); //CPU calculation clock_gettime(CLOCK_REALTIME, &time1); MMM_CPU(h_A, h_B, h_dst_cpu, LEN); clock_gettime(CLOCK_REALTIME, &time2); h_time = ts_ms(ts_diff(time1, time2)); printf("\nCPU Time: %lf ms\n", h_time); int i, num_elements; num_elements = LEN * LEN; for(i = 0; i < num_elements; i++) { if((h_dst_cpu - h_dst_gpu) > (float) TOL) { printf("\nResult verification issue at element %d | CPU: %f | GPU: %f\n", i, h_dst_cpu, h_dst_gpu); return 0; } } // Free stuff CUDA_SAFE_CALL(hipFree(d_A)); CUDA_SAFE_CALL(hipFree(d_B)); CUDA_SAFE_CALL(hipFree(d_dst)); free(h_A); free(h_B); free(h_dst_gpu); free(h_dst_cpu); printf("\nDone\n"); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////// MATRIX IMPLEMENTATIONS //////////////////////////////////////// float float_rand(float min, float max) { float f = (float)random()/RAND_MAX; return min + f * (max - min); } float* matrix_create(int len) { float* arr; if(len > 0) { arr = (float*) calloc(len*len, sizeof(float)); if(!arr) { printf("\n\tFailed to allocate array\n"); return NULL; } } else return NULL; return arr; } int matrix_init(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for (i = 0; i < len_sq; i++) { mat[i] = float_rand(MINVAL, MAXVAL); } return 1; } printf("\nError in initializing matrix\n"); return 0; } int matrix_zero(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { mat[i] = 0; } return 1; } printf("\nFailed to zero matrix\n"); return 0; } int matrix_copy(float* src, float* dst, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { dst[i] = src[i]; } return 1; } printf("\nFailed to copy matrix\n"); return 0; } void MMM_CPU(float* A, float* B, float* dst, int len) { int i, j, k; for (i = 0; i < len; i++) { for(j = 0; j < len; j++) { for(k = 0; k < len; k++) dst[i * len + j] += A[i * len + k] * B[k * len + j]; } } } ///////////////////////////// Timing related /////////////////////////////// double ts_ms(struct timespec ts) { return ((((double)(ts.tv_sec))*1.0e9) + ((double)(ts.tv_nsec)))/(1.0e6); } /* --------------------------------------------------------------------------- | Make the CPU busy, and measure CPS (cycles per second). | | Explanation: | If tests are very fast, they can run so quickly that the SpeedStep control | (in kernel and/or on-chip) doesn't notice in time, and the first few tests | might finish while the CPU is still in its sleep state (about 800 MHz, | judging from my measurements) | A simple way to get around this is to run some kind of busy-loop that | forces the OS and/or CPU to notice it needs to go to full clock speed. | We print out the results of the computation so the loop won't get optimised | away. | | Copy this code into other programs as desired. It provides three entry | points: | | double ts_sec(ts): converts a timespec into seconds | timespec ts_diff(ts1, ts2): computes interval between two timespecs | measure_cps(): Does the busy loop and prints out measured CPS (cycles/sec) --------------------------------------------------------------------------- */ struct timespec ts_diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } double measure_cps() { struct timespec cal_start, cal_end; mcps_tctr tsc_start, tsc_end; double total_time; double total_cycles; /* We perform a chaotic iteration and print the result, to defeat compiler optimisation */ double chaosC = -1.8464323952913974; double z = 0.0; long int i, ilim, j; /* Do it twice and throw away results from the first time; this ensures the * OS and CPU will notice it's busy and set the clock speed. */ for(j=0; j<2; j++) { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_start); MCPS_RDTSC(tsc_start); ilim = 50*1000*1000; for (i=0; i<ilim; i++) z = z * z + chaosC; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_end); MCPS_RDTSC(tsc_end); } total_time = ts_ms(ts_diff(cal_start, cal_end)); total_cycles = (double)(tsc_end.int64-tsc_start.int64); CPS = total_cycles / total_time; printf("z == %f, CPS == %g\n", z, CPS); return CPS; } /* --------------------------------------------------------------------------- | End of measure_cps code --------------------------------------------------------------------------- */ struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; }
83e305ce7e9ae1c1b2ecce6d602520637b2e2a2b.cu
#include <cstdio> #include <cstdlib> #include <math.h> #include <time.h> #define MINVAL 0.00 #define MAXVAL 10.0 #define TOL 1e-5 #define NUM_THREADS 16 double CPS = 2.9e9; int LEN; // to be defined via cmd args //////////////////////////// CUDA RELATED //////////////////////////////////// // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void MMM_kernel(float* A, float* B, float* dst, int len) { __shared__ float Ms [NUM_THREADS][NUM_THREADS]; __shared__ float Ns [NUM_THREADS][NUM_THREADS]; int bx, by, tx, ty, row, col; bx = blockIdx.x; by = blockIdx.y; tx = threadIdx.x; ty = threadIdx.y; row = by * NUM_THREADS + ty; col = bx * NUM_THREADS + tx; float partial = 0; for(int k = 0; k < len/NUM_THREADS; k++) { Ms[ty][tx] = A[row * len + (k * NUM_THREADS + tx)]; Ns[ty][tx] = B[col + (k * NUM_THREADS + ty) * len]; __syncthreads(); for(int r = 0; r < NUM_THREADS; r++) partial += Ms[ty][r] * Ns[r][tx]; __syncthreads(); } dst[row * len + col] = partial; } ////////////////////////////// MATRIX ///////////////////////////////////////// float* matrix_create(int len); int matrix_init(float* mat, int len); int matrix_zero(float* mat, int len); int matrix_copy(float* src, float* dst, int len); void MMM_CPU(float* A, float* B, float* dst, int len); ///////////////// Time related ////////////////////////////// //rdtsc related typedef union { unsigned long long int64; struct {unsigned int lo, hi;} int32; } mcps_tctr; #define MCPS_RDTSC(cpu_c) __asm__ __volatile__ ("rdtsc" : \ "=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi)) int clock_gettime(clockid_t clk_id, struct timespec *tp); struct timespec diff(struct timespec start, struct timespec end); double ts_ms(struct timespec ts); struct timespec ts_diff(struct timespec start, struct timespec end); double measure_cps(void); //////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { if(argc != 2) { printf("\nPlease pass a length in.\n"); return 0; } LEN = strtol(argv[1], NULL, 10); if(LEN <= 0) { printf("\nLength must be greater than zero\n"); return 0; } int size = LEN * LEN * sizeof(float); int NUM_BLOCKS = LEN / NUM_THREADS; if(LEN % NUM_THREADS != 0) // die if not a good fit { printf("\nOdd Numbr of blocks\n"); return 0; } // CUDA Timing cudaEvent_t start_full, start_mmm, stop_full, stop_mmm; float d_time_full, d_time_mmm; // CPU Timing struct timespec time1, time2; double h_time; // CPU set up float *h_A, *h_B, *h_dst_gpu, *h_dst_cpu, *d_A, *d_B, *d_dst; measure_cps(); h_A = matrix_create(LEN); if(!h_A) return 0; if(!matrix_init(h_A, LEN)) return 0; h_B = matrix_create(LEN); if(!h_B) return 0; if(!matrix_init(h_B, LEN)) return 0; h_dst_cpu = matrix_create(LEN); // cpu result if(!h_dst_cpu) return 0; if(!matrix_zero(h_dst_cpu, LEN)) return 0; h_dst_gpu = matrix_create(LEN); // gpu result if(!h_dst_gpu) return 0; if(!matrix_zero(h_dst_gpu, LEN)) return 0; // GPU Set up d_A = NULL; d_B = NULL; d_dst = NULL; CUDA_SAFE_CALL(cudaSetDevice(0)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_A, size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_B, size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_dst, size)); cudaEventCreate(&start_full); cudaEventCreate(&start_mmm); cudaEventCreate(&stop_full); cudaEventCreate(&stop_mmm); // start the GPU calculations dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1); dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS, 1); cudaEventRecord(start_full,0); CUDA_SAFE_CALL(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice)); cudaEventRecord(start_mmm,0); MMM_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_dst, LEN); cudaEventRecord(stop_mmm,0); cudaEventSynchronize(stop_mmm); CUDA_SAFE_CALL(cudaPeekAtLastError()); CUDA_SAFE_CALL(cudaThreadSynchronize()); CUDA_SAFE_CALL(cudaMemcpy(h_dst_gpu, d_dst, size, cudaMemcpyDeviceToHost)); cudaEventRecord(stop_full, 0); cudaEventSynchronize(stop_full); cudaEventElapsedTime(&d_time_mmm, start_mmm, stop_mmm); cudaEventElapsedTime(&d_time_full, start_full, stop_full); printf("\nGPU MMM Time: %f ms", d_time_mmm); printf("\nGPU FUll Time: %f ms", d_time_full); cudaEventDestroy(start_full); cudaEventDestroy(stop_full); //CPU calculation clock_gettime(CLOCK_REALTIME, &time1); MMM_CPU(h_A, h_B, h_dst_cpu, LEN); clock_gettime(CLOCK_REALTIME, &time2); h_time = ts_ms(ts_diff(time1, time2)); printf("\nCPU Time: %lf ms\n", h_time); int i, num_elements; num_elements = LEN * LEN; for(i = 0; i < num_elements; i++) { if((h_dst_cpu - h_dst_gpu) > (float) TOL) { printf("\nResult verification issue at element %d | CPU: %f | GPU: %f\n", i, h_dst_cpu, h_dst_gpu); return 0; } } // Free stuff CUDA_SAFE_CALL(cudaFree(d_A)); CUDA_SAFE_CALL(cudaFree(d_B)); CUDA_SAFE_CALL(cudaFree(d_dst)); free(h_A); free(h_B); free(h_dst_gpu); free(h_dst_cpu); printf("\nDone\n"); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////// MATRIX IMPLEMENTATIONS //////////////////////////////////////// float float_rand(float min, float max) { float f = (float)random()/RAND_MAX; return min + f * (max - min); } float* matrix_create(int len) { float* arr; if(len > 0) { arr = (float*) calloc(len*len, sizeof(float)); if(!arr) { printf("\n\tFailed to allocate array\n"); return NULL; } } else return NULL; return arr; } int matrix_init(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for (i = 0; i < len_sq; i++) { mat[i] = float_rand(MINVAL, MAXVAL); } return 1; } printf("\nError in initializing matrix\n"); return 0; } int matrix_zero(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { mat[i] = 0; } return 1; } printf("\nFailed to zero matrix\n"); return 0; } int matrix_copy(float* src, float* dst, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { dst[i] = src[i]; } return 1; } printf("\nFailed to copy matrix\n"); return 0; } void MMM_CPU(float* A, float* B, float* dst, int len) { int i, j, k; for (i = 0; i < len; i++) { for(j = 0; j < len; j++) { for(k = 0; k < len; k++) dst[i * len + j] += A[i * len + k] * B[k * len + j]; } } } ///////////////////////////// Timing related /////////////////////////////// double ts_ms(struct timespec ts) { return ((((double)(ts.tv_sec))*1.0e9) + ((double)(ts.tv_nsec)))/(1.0e6); } /* --------------------------------------------------------------------------- | Make the CPU busy, and measure CPS (cycles per second). | | Explanation: | If tests are very fast, they can run so quickly that the SpeedStep control | (in kernel and/or on-chip) doesn't notice in time, and the first few tests | might finish while the CPU is still in its sleep state (about 800 MHz, | judging from my measurements) | A simple way to get around this is to run some kind of busy-loop that | forces the OS and/or CPU to notice it needs to go to full clock speed. | We print out the results of the computation so the loop won't get optimised | away. | | Copy this code into other programs as desired. It provides three entry | points: | | double ts_sec(ts): converts a timespec into seconds | timespec ts_diff(ts1, ts2): computes interval between two timespecs | measure_cps(): Does the busy loop and prints out measured CPS (cycles/sec) --------------------------------------------------------------------------- */ struct timespec ts_diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } double measure_cps() { struct timespec cal_start, cal_end; mcps_tctr tsc_start, tsc_end; double total_time; double total_cycles; /* We perform a chaotic iteration and print the result, to defeat compiler optimisation */ double chaosC = -1.8464323952913974; double z = 0.0; long int i, ilim, j; /* Do it twice and throw away results from the first time; this ensures the * OS and CPU will notice it's busy and set the clock speed. */ for(j=0; j<2; j++) { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_start); MCPS_RDTSC(tsc_start); ilim = 50*1000*1000; for (i=0; i<ilim; i++) z = z * z + chaosC; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_end); MCPS_RDTSC(tsc_end); } total_time = ts_ms(ts_diff(cal_start, cal_end)); total_cycles = (double)(tsc_end.int64-tsc_start.int64); CPS = total_cycles / total_time; printf("z == %f, CPS == %g\n", z, CPS); return CPS; } /* --------------------------------------------------------------------------- | End of measure_cps code --------------------------------------------------------------------------- */ struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; }
f812e54b892e2d143bbec764ee8ad562200a8e3d.hip
// !!! This is a file automatically generated by hipify!!! //#include <stdlib.h> //#include <string.h> //#include <stdio.h> #include "jim.h" #include "jimautoconf.h" #include "jim-subcmd.h" static __device__ int history_cmd_getline(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __HIPCC__ char *line = nullptr; //Jim_HistoryGetline(Jim_String(argv[0])); #else char *line = Jim_HistoryGetline(Jim_String(argv[0])); #endif // On EOF returns -1 if varName was specified; otherwise the empty string. if (line == NULL) { if (argc == 2) Jim_SetResultInt(interp, -1); return JIM_OK; } Jim_Obj *objPtr = Jim_NewStringObjNoAlloc(interp, line, -1); // Returns the length of the string if varName was specified if (argc == 2) { if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) { Jim_FreeNewObj(interp, objPtr); return JIM_ERROR; } Jim_SetResultInt(interp, Jim_Length(objPtr)); } else Jim_SetResult(interp, objPtr); return JIM_OK; } static __device__ int history_cmd_load(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __HIPCC__ //Jim_HistoryLoad(Jim_String(argv[0])); #else Jim_HistoryLoad(Jim_String(argv[0])); #endif return JIM_OK; } static __device__ int history_cmd_save(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __HIPCC__ //Jim_HistorySave(Jim_String(argv[0])); #else Jim_HistorySave(Jim_String(argv[0])); #endif return JIM_OK; } static __device__ int history_cmd_add(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __HIPCC__ //Jim_HistoryAdd(Jim_String(argv[0])); #else Jim_HistoryAdd(Jim_String(argv[0])); #endif return JIM_OK; } static __device__ int history_cmd_show(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __HIPCC__ //Jim_HistoryShow(); #else Jim_HistoryShow(); #endif return JIM_OK; } __constant__ static const jim_subcmd_type _history_command_table[] = { { "getline", "prompt ?varname?", history_cmd_getline, 1, 2 }, // Description: Reads one line from the user. Similar to gets. { "load", "filename", history_cmd_load, 1, 1, }, // Description: Loads history from the given file, if possible { "save", "filename", history_cmd_save, 1, 1 }, // Description: Saves history to the given file { "add", "line", history_cmd_add, 1, 1 }, // Description: Adds the line to the history ands saves { "show", NULL, history_cmd_show, 0, 0 }, // Description: Displays the history { NULL } }; static __device__ int JimHistorySubCmdProc(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return Jim_CallSubCmd(interp, Jim_ParseSubCmd(interp, _history_command_table, argc, argv), argc, argv); } static __device__ void JimHistoryDelProc(ClientData privData, Jim_Interp *interp) { Jim_Free(privData); } __device__ int Jim_historyInit(Jim_Interp *interp) { if (Jim_PackageProvide(interp, "history", "1.0", JIM_ERRMSG)) return JIM_ERROR; void **history = (void **)Jim_Alloc(sizeof(*history)); *history = NULL; Jim_CreateCommand(interp, "history", JimHistorySubCmdProc, history, JimHistoryDelProc); return JIM_OK; }
f812e54b892e2d143bbec764ee8ad562200a8e3d.cu
//#include <stdlib.h> //#include <string.h> //#include <stdio.h> #include "jim.h" #include "jimautoconf.h" #include "jim-subcmd.h" static __device__ int history_cmd_getline(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __CUDACC__ char *line = nullptr; //Jim_HistoryGetline(Jim_String(argv[0])); #else char *line = Jim_HistoryGetline(Jim_String(argv[0])); #endif // On EOF returns -1 if varName was specified; otherwise the empty string. if (line == NULL) { if (argc == 2) Jim_SetResultInt(interp, -1); return JIM_OK; } Jim_Obj *objPtr = Jim_NewStringObjNoAlloc(interp, line, -1); // Returns the length of the string if varName was specified if (argc == 2) { if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) { Jim_FreeNewObj(interp, objPtr); return JIM_ERROR; } Jim_SetResultInt(interp, Jim_Length(objPtr)); } else Jim_SetResult(interp, objPtr); return JIM_OK; } static __device__ int history_cmd_load(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __CUDACC__ //Jim_HistoryLoad(Jim_String(argv[0])); #else Jim_HistoryLoad(Jim_String(argv[0])); #endif return JIM_OK; } static __device__ int history_cmd_save(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __CUDACC__ //Jim_HistorySave(Jim_String(argv[0])); #else Jim_HistorySave(Jim_String(argv[0])); #endif return JIM_OK; } static __device__ int history_cmd_add(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __CUDACC__ //Jim_HistoryAdd(Jim_String(argv[0])); #else Jim_HistoryAdd(Jim_String(argv[0])); #endif return JIM_OK; } static __device__ int history_cmd_show(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if __CUDACC__ //Jim_HistoryShow(); #else Jim_HistoryShow(); #endif return JIM_OK; } __constant__ static const jim_subcmd_type _history_command_table[] = { { "getline", "prompt ?varname?", history_cmd_getline, 1, 2 }, // Description: Reads one line from the user. Similar to gets. { "load", "filename", history_cmd_load, 1, 1, }, // Description: Loads history from the given file, if possible { "save", "filename", history_cmd_save, 1, 1 }, // Description: Saves history to the given file { "add", "line", history_cmd_add, 1, 1 }, // Description: Adds the line to the history ands saves { "show", NULL, history_cmd_show, 0, 0 }, // Description: Displays the history { NULL } }; static __device__ int JimHistorySubCmdProc(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return Jim_CallSubCmd(interp, Jim_ParseSubCmd(interp, _history_command_table, argc, argv), argc, argv); } static __device__ void JimHistoryDelProc(ClientData privData, Jim_Interp *interp) { Jim_Free(privData); } __device__ int Jim_historyInit(Jim_Interp *interp) { if (Jim_PackageProvide(interp, "history", "1.0", JIM_ERRMSG)) return JIM_ERROR; void **history = (void **)Jim_Alloc(sizeof(*history)); *history = NULL; Jim_CreateCommand(interp, "history", JimHistorySubCmdProc, history, JimHistoryDelProc); return JIM_OK; }
6e3f474c49111be6ab9182d21897066799076ee5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../common/book.h" #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU HANDLE_ERROR( hipMalloc( (void**)&dev_a, N*sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b, N*sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( hipMemcpy( dev_a, a, N*sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b, b, N*sizeof(float), hipMemcpyHostToDevice ) ); hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( hipMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the gpu side HANDLE_ERROR( hipFree( dev_a ) ); HANDLE_ERROR( hipFree( dev_b ) ); HANDLE_ERROR( hipFree( dev_partial_c ) ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); } /* VERSIN PROFE */ #include <stdio.h> #define N 1024*1024 #define T 128 __global__ void dotproduct(float *a, float *b, float *result) { int i = threadIdx.x; int j = blockIdx.x*T + i; __shared__ float temp[T]; temp[i] = a[j] * b[j]; __syncthreads(); if (threadIdx.x == 0) { float sum = 0.0; for (i=0; i < T; i++) sum += temp[i]; atomicAdd(result, sum); } } int main() { float *a = (float *) malloc(N*sizeof(float)); float *b = (float *) malloc(N*sizeof(float)); float c = 0.0; int i; for (i=0; i < N; i++) a[i] = b[i] = 1.0; float *d_a, *d_b, *d_c; hipMalloc((void **) &d_a, N*sizeof(float)); hipMalloc((void **) &d_b, N*sizeof(float)); hipMalloc((void **) &d_c, sizeof(float)); hipMemcpy(d_a, a, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_c, &c, sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( dotproduct), dim3(N/T), dim3(T), 0, 0, d_a, d_b, d_c); hipLaunchKernelGGL(( dotproduct), dim3(N/T), dim3(T), 0, 0, d_a, d_b, d_c); printf("c = %f\n", c); exit(0); }
6e3f474c49111be6ab9182d21897066799076ee5.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../common/book.h" #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( cudaMemcpy( dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice ) ); dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the gpu side HANDLE_ERROR( cudaFree( dev_a ) ); HANDLE_ERROR( cudaFree( dev_b ) ); HANDLE_ERROR( cudaFree( dev_partial_c ) ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); } /* VERSIÓN PROFE */ #include <stdio.h> #define N 1024*1024 #define T 128 __global__ void dotproduct(float *a, float *b, float *result) { int i = threadIdx.x; int j = blockIdx.x*T + i; __shared__ float temp[T]; temp[i] = a[j] * b[j]; __syncthreads(); if (threadIdx.x == 0) { float sum = 0.0; for (i=0; i < T; i++) sum += temp[i]; atomicAdd(result, sum); } } int main() { float *a = (float *) malloc(N*sizeof(float)); float *b = (float *) malloc(N*sizeof(float)); float c = 0.0; int i; for (i=0; i < N; i++) a[i] = b[i] = 1.0; float *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, N*sizeof(float)); cudaMalloc((void **) &d_b, N*sizeof(float)); cudaMalloc((void **) &d_c, sizeof(float)); cudaMemcpy(d_a, a, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_c, &c, sizeof(float), cudaMemcpyHostToDevice); dotproduct<<<N/T, T>>>(d_a, d_b, d_c); dotproduct<<<N/T, T>>>(d_a, d_b, d_c); printf("c = %f\n", c); exit(0); }
9bca041eb486d82ed1c8d21fe32efa3be06d57ff.hip
// !!! This is a file automatically generated by hipify!!! // ** Original codelet code ** // // #pragma hmppcg cpiparam __arg0 INOUT e%hmpp_codelet__threeMMloopa: (1, 2) e%hmpp_codelet__threeMMloopc: (3, 0) // #pragma hmppcg cpiparam __arg1 INOUT f%hmpp_codelet__threeMMloopb: (2, 2) f%hmpp_codelet__threeMMloopc: (3, 1) // #pragma hmppcg cpiparam __arg2 INOUT a%hmpp_codelet__threeMMloopa: (1, 0) // #pragma hmppcg cpiparam __arg3 INOUT b%hmpp_codelet__threeMMloopa: (1, 1) // #pragma hmppcg cpiparam __arg4 INOUT c%hmpp_codelet__threeMMloopb: (2, 0) // #pragma hmppcg cpiparam __arg5 INOUT d%hmpp_codelet__threeMMloopb: (2, 1) // #pragma hmppcg cpiparam __arg6 INOUT g%hmpp_codelet__threeMMloopc: (3, 2) // // #pragma hmppcg cpicall hmpp_codelet__threeMMloopa(__arg2, __arg3, __arg0): 1 // #pragma hmppcg cpicall hmpp_codelet__threeMMloopb(__arg4, __arg5, __arg1): 2 // #pragma hmppcg cpicall hmpp_codelet__threeMMloopc(__arg0, __arg1, __arg6): 3 // // // /* begin of extracted source code for directive set "group1" */ // // // # 32 "threemm.c" // typedef float DATA_TYPE; // // // # 42 "threemm.c" // void hmpp_codelet__threeMMloopa(DATA_TYPE a[512][512], DATA_TYPE b[512][512], DATA_TYPE e[512][512]) // { // int i, j, k; // // // #pragma hmppcg grid blocksize 32 X 8 // # 10 "<preprocessor>" // # 49 "threemm.c" // #pragma hmppcg parallel // # 13 "<preprocessor>" // # 50 "threemm.c" // for (i = 0 ; i < 512 ; i++) // { // #pragma hmppcg parallel // # 18 "<preprocessor>" // # 53 "threemm.c" // for (j = 0 ; j < 512 ; j++) // { // e[i][j] = 0; // // #pragma hmppcg noParallel // # 25 "<preprocessor>" // # 58 "threemm.c" // for (k = 0 ; k < 512 ; ++k) // { // e[i][j] += a[i][k] * b[k][j]; // } // } // } // } // // // # 67 "threemm.c" // void hmpp_codelet__threeMMloopb(DATA_TYPE c[512][512], DATA_TYPE d[512][512], DATA_TYPE f[512][512]) // { // int i, j, k; // // // #pragma hmppcg grid blocksize 32 X 8 // # 10 "<preprocessor>" // # 74 "threemm.c" // #pragma hmppcg parallel // # 13 "<preprocessor>" // # 75 "threemm.c" // for (i = 0 ; i < 512 ; i++) // { // #pragma hmppcg parallel // # 18 "<preprocessor>" // # 78 "threemm.c" // for (j = 0 ; j < 512 ; j++) // { // f[i][j] = 0; // // #pragma hmppcg noParallel // # 25 "<preprocessor>" // # 83 "threemm.c" // for (k = 0 ; k < 512 ; ++k) // { // f[i][j] += c[i][k] * d[k][j]; // } // } // } // } // // // # 92 "threemm.c" // void hmpp_codelet__threeMMloopc(DATA_TYPE e[512][512], DATA_TYPE f[512][512], DATA_TYPE g[512][512]) // { // int i, j, k; // // // #pragma hmppcg grid blocksize 32 X 8 // # 10 "<preprocessor>" // # 99 "threemm.c" // #pragma hmppcg parallel // # 13 "<preprocessor>" // # 100 "threemm.c" // for (i = 0 ; i < 512 ; i++) // { // // #pragma hmppcg parallel // # 19 "<preprocessor>" // # 104 "threemm.c" // for (j = 0 ; j < 512 ; j++) // { // g[i][j] = 0; // // #pragma hmppcg noParallel // # 26 "<preprocessor>" // # 109 "threemm.c" // for (k = 0 ; k < 512 ; ++k) // { // g[i][j] += e[i][k] * f[k][j]; // } // } // } // } // // // /* end of extracted source code for directive set "group1" */ // // // // ** End of original codelet codelet ** #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _MSC_VER # define HMPPCG_RESTRICT typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; # ifdef _WIN64 typedef int64_t intptr_t; # else typedef int32_t intptr_t; # endif #else # if defined(__GNUC__) || defined(__RESTRICT) # define HMPPCG_RESTRICT __restrict # else # define HMPPCG_RESTRICT # endif # include <stdint.h> #endif // Dynamic array typedef struct __hmppcg_array_struct { void *array; size_t *size; size_t elsize; } __hmppcg_array_t; // Data section typedef struct __hmppcg_DataSection { size_t from; size_t to; size_t step; } __hmppcg_DataSection; #include <hip/hip_runtime.h> #if CUDART_VERSION < 2000 #error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required. #endif #define HMPP_CONSTMEM_OFFSET 0 #include <map> #include <string> // ---------------------------------------------------------------------------- // HMPP CUDA support classes // ---------------------------------------------------------------------------- #ifndef __HMPP_CUDADATA_H__ #define __HMPP_CUDADATA_H__ #ifndef HMPPCG_WARP_SIZE #define HMPPCG_WARP_SIZE 32 #endif enum CopyKind { HostToHost = 0, HostToDevice = 1, DeviceToHost = 2, DeviceToDevice = 3, }; inline int hmppcg_check_status(const char *file,int line,hipError_t status) { if(status != hipSuccess) { fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line, hipGetErrorString(status)); return -1; } return 0; } #define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X)) #define HMPP_CHECK_GRID_BOUNDARY(x) \ if(x>65535){\ fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\ exit(-1) ;\ } #define HMPP_CHECK_BLOCK_BOUNDARY(x) \ if(x > devProp.maxThreadsPerBlock){ \ fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \ exit(-1) ; \ } // ---------------------------------------------------------------------------- // class DefaultPolicy // ---------------------------------------------------------------------------- struct DefaultPolicy { public: DefaultPolicy() { } virtual ~DefaultPolicy() { } int deviceAlloc(void **ptr,size_t size) { if( CHECK_STATUS(hipStreamCreate(&stream_)) != 0 ) return -1; if( CHECK_STATUS(hipMalloc(ptr,size)) != 0 ) return -1; #if TORCH_HIP_VERSION >= 3020 if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0) return -1; #endif return 0; } int deviceFree(void *ptr) { if( CHECK_STATUS(hipStreamDestroy(stream_)) != 0) return -1; if( CHECK_STATUS(hipFree(ptr)) != 0) return -1; if( CHECK_STATUS(hipEventDestroy(event)) != 0) return -1; return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { static hipMemcpyKind cudaKind[] = {hipMemcpyHostToHost, hipMemcpyHostToDevice, hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice }; if(async) { return CHECK_STATUS(hipMemcpyAsync(dst,src,size,cudaKind[kind],stream_)); } else { return CHECK_STATUS(hipMemcpy(dst,src,size,cudaKind[kind])); } } int makeStreamWait(hipStream_t wstream) { int status; status = CHECK_STATUS(hipEventRecord(event, stream_)); if (status != 0) return status; #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(hipEventSynchronize(event)); #endif } int waitOnEvent(hipEvent_t wevent) { #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(stream_, wevent, 0)); #else return CHECK_STATUS(hipEventSynchronize(wevent)); #endif } int deviceWait() { return CHECK_STATUS(hipStreamSynchronize(stream_)); } private: hipStream_t stream_; hipEvent_t event; }; // ---------------------------------------------------------------------------- // class ConstantPolicy // ---------------------------------------------------------------------------- #ifndef HMPP_CONSTMEM_SIZE #define HMPP_CONSTMEM_SIZE 2048 #endif __constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8]; /// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error) extern __shared__ int64_t hmpp_sharedmem[]; struct ConstantPolicy { public: ConstantPolicy() { static bool initialized = false; if(!initialized) { next_offset_ = HMPP_CONSTMEM_OFFSET; initialized = true; } offset_ = -1; } virtual ~ConstantPolicy() { } void setStaticOffset(int offset) { offset_ = offset; while(offset_ % 8) offset_ ++; } int deviceAlloc(void **ptr, size_t size) { #if TORCH_HIP_VERSION >= 3020 if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0) return -1; #endif if(offset_ != -1) { if((offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)offset_; return 0; } if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)next_offset_; next_offset_ += size; return 0; } int deviceFree(void *ptr) { return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { size_t offset; switch(kind) { case HostToDevice: offset = (size_t)dst; return CHECK_STATUS(hipMemcpyToSymbol(hmpp_constmem,src,size,offset,hipMemcpyHostToDevice)); case DeviceToHost: offset = (size_t)src; return CHECK_STATUS(hipMemcpyFromSymbol(dst,hmpp_constmem,size,offset,hipMemcpyDeviceToHost)); default: return -1; } } int makeStreamWait(hipStream_t wstream) { int status; /* stream 0 at the moment */ status = CHECK_STATUS(hipEventRecord(event, 0)); if (status != 0) return status; #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(hipEventSynchronize(event)); #endif } int waitOnEvent(hipEvent_t wevent) { /* stream 0 at the moment */ #if TORCH_HIP_VERSION >= 3020 return CHECK_STATUS(hipStreamWaitEvent(0, wevent, 0)); #else return CHECK_STATUS(hipEventSynchronize(wevent)); #endif } int deviceWait() { return 0; } private: static size_t next_offset_; int offset_; hipEvent_t event; }; size_t ConstantPolicy::next_offset_; // ---------------------------------------------------------------------------- // class Lazy // ---------------------------------------------------------------------------- template <typename Policy> struct Lazy { char * value; bool valid; bool allocated; void ** devaddr; Policy * policy; size_t size; Lazy(size_t elem_size) { value = new char[elem_size]; } ~Lazy() { delete[] value; } int requireDeviceAlloc() { if(!allocated) { allocated = true; return policy->deviceAlloc(devaddr,size); } else { return 0; } } }; // ---------------------------------------------------------------------------- // class Element // ---------------------------------------------------------------------------- template <typename T,typename Policy> struct Element { Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy) : device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy) { } Element &operator=(const T & value) { if(lazy_) { *((T *)(lazy_->value)) = value; lazy_->valid = true; return *this; } if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false); return *this; } Element &operator=(const Element & src) { if(src.lazy_ && src.lazy_->valid) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)(src.lazy_->value)); return *this; } if(lazy_) lazy_->requireDeviceAlloc(); if(src.lazy_) src.lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_, ElemSize,DeviceToDevice,false); if(lazy_) { lazy_->valid = false; } return *this; } operator T() { if(lazy_ && lazy_->valid) return *((T *)(lazy_->value)); T res; if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false); if(lazy_) { *((T *)(lazy_->value)) = res; lazy_->valid = true; } return res; } typedef T Type; enum { ElemSize = sizeof(T) }; private: size_t offset_; void *const* device_addr_; Policy *policy_; public: Lazy<Policy> * lazy_; }; enum DataFlags { DEFAULT = 0x0, LAZY = 0x1 }; // ---------------------------------------------------------------------------- // class Data // ---------------------------------------------------------------------------- template <typename T,typename Policy> class Data { public: typedef T Type; typedef Element<T,Policy> ElementType; enum { ElemSize = sizeof(T) }; Data(const char * name, unsigned int flags = DEFAULT) : name_(name), flags_(flags), dim_(0), sizes_(0), size_(0), host_addr_(0), device_addr_(0) { policy_ = new Policy; if(flags_ & LAZY) { lazy_ = new Lazy<Policy>(ElemSize); lazy_->valid = false; lazy_->devaddr = 0; lazy_->policy = policy_; } else lazy_ = 0; } ~Data() { free(); delete policy_; if(lazy_) delete lazy_; } int allocate(unsigned int dim, size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return allocate2(dim,sizes); } int allocate3(unsigned int dim_p, const size_t * sizes_p) { size_t sizes[2]; sizes[0] = 1; sizes[1] = 0; for(int d = 0 ; d < dim_p ; d++) { sizes[0] *= sizes_p[d]; } return allocate2(1, sizes); } int allocate2(unsigned int dim, const size_t * sizes) { dim_ = dim; sizes_ = new size_t[dim]; dimSizes_ = new size_t[dim]; size_ = ElemSize; for(int d=0;d<dim;d++) { sizes_[d] = sizes[d]; size_ *= sizes_[d]; size_t size = 1; for(int d2=d+1;d2<dim;d2++) size*=sizes[d2]; dimSizes_[d] = size; } if(lazy_) { lazy_->allocated = false; lazy_->devaddr = &device_addr_; lazy_->size = size_; return 0; } else return policy_->deviceAlloc(&device_addr_,size_); } int free() { if(sizes_) { delete [] sizes_; delete [] dimSizes_; sizes_ = 0; dim_ = 0; size_ = 0; } if(device_addr_) { if(policy_->deviceFree(device_addr_) != 0) return -1; device_addr_ = 0; } return 0; } int download(void * host_addr,bool async) { if(lazy_ && lazy_->valid) { *((T *)host_addr) = *((T *)(lazy_->value)); return 0; } if(lazy_) { lazy_->requireDeviceAlloc(); } int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async); if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)host_addr); } return sts; } int upload(const void * host_addr,bool async) { if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = * ((T *)host_addr); lazy_->requireDeviceAlloc(); } return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async); } int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async); } int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async); } int makeStreamWait(hipStream_t wstream) { if(lazy_) lazy_->requireDeviceAlloc(); return policy_->makeStreamWait(wstream); } int waitOnEvent(hipEvent_t wevent) { return policy_->waitOnEvent(wevent); } int waitTransfer() { return policy_->deviceWait(); } ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return at(sizes); } ElementType at(size_t *idx) { size_t offset = idx[0]; return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_); } template <typename Y> Element<Y,Policy> at(size_t offset) { return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_); } ElementType operator=(const T & value) { ElementType res(&device_addr_,0,policy_,lazy_); res = value; return res; } ElementType operator=(const Data &data) { return operator=(data.value()); } T value() const { ElementType res(&device_addr_,0,policy_,lazy_); return (T)res; } operator T() { return value(); } T *getDeviceAddr() { if(lazy_) lazy_->requireDeviceAlloc(); if(lazy_ && lazy_->valid) { policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false); } return (T*)device_addr_; } void invalidateLazy() { if(lazy_) { lazy_->valid = false; } } private: Data(const Data &data) {} int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async) { int d; int size = 1; for(d=cur+1;d<dim_;d++) size *= sizes_[d]; if(cur<(lastdense-1)) { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step) if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0) return -1; } else { int step = sections[cur].step; if(step == 1) { int start = (offset + sections[cur].from * size) * ElemSize; int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize; return policy_->deviceMemcpy(dst+start,src+start,total,kind,async); } else { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=step) { int off = (offset + x * size) * ElemSize; if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0) return -1; } } } return 0; } int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async) { int i; int lastdense = dim_; for (i = dim_ - 1 ; i >= 0 ; i --) { if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1)) lastdense = i; else break; } return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async); } const char * name_; size_t flags_; void *device_addr_; void *host_addr_; size_t dim_; size_t *sizes_; size_t *dimSizes_; size_t size_; Lazy<Policy> * lazy_; public: Policy *policy_; }; // --------------------------------------------------------------------------- // User data // --------------------------------------------------------------------------- class UserData{ public: virtual ~UserData(){} UserData(){} }; #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef float2 __hmppcg_complex_float; #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef double2 __hmppcg_complex_double; // --------------------------------------------------------------------------- // Allocatable Arrays // --------------------------------------------------------------------------- template <const size_t nb_dims> struct AArrayDesc { int lbounds_[nb_dims]; size_t sizes_[nb_dims]; size_t wholesize_; }; #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \ { int alloc_ranges[] = { __VA_ARGS__ }; \ int hmppcg_alloc_i; \ var ## _aarray_desc.wholesize_ = 1; \ for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \ int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \ int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \ int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \ var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \ var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \ var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \ } \ if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \ return; \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \ { \ var.free(); \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \ (var.getDeviceAddr() != NULL) #endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \ var ## _aarray_desc.wholesize_ #endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE #define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \ var ## _aarray_desc.sizes_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \ var ## _aarray_desc.lbounds_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND #ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \ (var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1) #endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND #ifndef __HMPP_INT_POW_FUNC #define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ if(exp < 0) \ return 0; \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_INT_POW_FUNC( i64, int64_t ); __HMPP_INT_POW_FUNC( i32, int32_t ); __HMPP_INT_POW_FUNC( i16, int16_t ); __HMPP_INT_POW_FUNC( i8, int8_t ); #ifndef __HMPP_UINT_POW_FUNC #define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_UINT_POW_FUNC( ui64, uint64_t ); __HMPP_UINT_POW_FUNC( ui32, uint32_t ); __HMPP_UINT_POW_FUNC( ui16, uint16_t ); __HMPP_UINT_POW_FUNC( ui8, uint8_t ); #endif // __HMPP_CUDADATA_H__ #ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef struct { double x; double y; }__hmppcg_complex_double; #endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */ #ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef struct { float x; float y; }__hmppcg_complex_float; #endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */ template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__threeMMloopa_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT b, float * HMPPCG_RESTRICT e) { int32_t j_3; int32_t i_3; j_3 = (blockDimX__ * blockIdx.x + threadIdx.x); i_3 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_3 <= 511) & (i_3 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label1; }; e[(i_3 * 512) + j_3] = 0; { int32_t __hmppcg_end, k_3; for (k_3 = 0, __hmppcg_end = 511; k_3 <= __hmppcg_end; k_3 += 1) { e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + k_3]) * (b[(k_3 * 512) + j_3])); } } __hmppcg_label1:; } void hmpp_codelet__threeMMloopa( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & b, Data<float,DefaultPolicy> & e) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if TORCH_HIP_VERSION >= 3020 a.makeStreamWait(kernel_stream); b.makeStreamWait(kernel_stream); e.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hipLaunchKernelGGL(( hmpp_codelet__threeMMloopa_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, a.getDeviceAddr(), b.getDeviceAddr(), e.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return; #if TORCH_HIP_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); b.waitOnEvent(kernel_event); e.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif }; } template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__threeMMloopb_loop0_( float * HMPPCG_RESTRICT c, float * HMPPCG_RESTRICT d, float * HMPPCG_RESTRICT f) { int32_t j_4; int32_t i_4; j_4 = (blockDimX__ * blockIdx.x + threadIdx.x); i_4 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_4 <= 511) & (i_4 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label3; }; f[(i_4 * 512) + j_4] = 0; { int32_t __hmppcg_end, k_4; for (k_4 = 0, __hmppcg_end = 511; k_4 <= __hmppcg_end; k_4 += 1) { f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + k_4]) * (d[(k_4 * 512) + j_4])); } } __hmppcg_label3:; } void hmpp_codelet__threeMMloopb( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & c, Data<float,DefaultPolicy> & d, Data<float,DefaultPolicy> & f) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if TORCH_HIP_VERSION >= 3020 c.makeStreamWait(kernel_stream); d.makeStreamWait(kernel_stream); f.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hipLaunchKernelGGL(( hmpp_codelet__threeMMloopb_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, c.getDeviceAddr(), d.getDeviceAddr(), f.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return; #if TORCH_HIP_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return; c.waitOnEvent(kernel_event); d.waitOnEvent(kernel_event); f.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif }; } template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__threeMMloopc_loop0_( float * HMPPCG_RESTRICT e_11, float * HMPPCG_RESTRICT f_11, float * HMPPCG_RESTRICT g) { int32_t j_5; int32_t i_5; j_5 = (blockDimX__ * blockIdx.x + threadIdx.x); i_5 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_5 <= 511) & (i_5 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label5; }; g[(i_5 * 512) + j_5] = 0; { int32_t __hmppcg_end, k_5; for (k_5 = 0, __hmppcg_end = 511; k_5 <= __hmppcg_end; k_5 += 1) { g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + k_5]) * (f_11[(k_5 * 512) + j_5])); } } __hmppcg_label5:; } void hmpp_codelet__threeMMloopc( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & e_1, Data<float,DefaultPolicy> & f_1, Data<float,DefaultPolicy> & g) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if TORCH_HIP_VERSION >= 3020 e_1.makeStreamWait(kernel_stream); f_1.makeStreamWait(kernel_stream); g.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hipLaunchKernelGGL(( hmpp_codelet__threeMMloopc_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, e_1.getDeviceAddr(), f_1.getDeviceAddr(), g.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return; #if TORCH_HIP_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return; e_1.waitOnEvent(kernel_event); f_1.waitOnEvent(kernel_event); g.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return; #endif }; } // HMPP_API #ifdef __cplusplus #define HMPP_EXTERN extern "C" #else #define HMPP_EXTERN #endif #ifdef _WIN32 #define HMPP_EXPORT __declspec(dllexport) #define HMPP_INLINE __inline #else #define HMPP_EXPORT #define HMPP_INLINE inline #endif #define HMPP_API HMPP_EXTERN HMPP_EXPORT // HMPPCG_POP_HASH #define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor)) // --------------------------------------------------------------------------- // HMPP handle // --------------------------------------------------------------------------- typedef struct hmpp_handle_struct { Data<float,DefaultPolicy> * __arg0; Data<float,DefaultPolicy> * __arg1; Data<float,DefaultPolicy> * __arg2; Data<float,DefaultPolicy> * __arg3; Data<float,DefaultPolicy> * __arg4; Data<float,DefaultPolicy> * __arg5; Data<float,DefaultPolicy> * __arg6; hipDeviceProp_t devProp; hipStream_t kernel_stream; hipEvent_t kernel_event; std::map<std::string,UserData*> map_user_data; } hmpp_handle_t; // --------------------------------------------------------------------------- // hmpp_createInstance() // --------------------------------------------------------------------------- HMPP_API hmpp_handle_t * hmpp_createInstance() { hmpp_handle_t * __h = new hmpp_handle_t; if(!__h) return 0; if(CHECK_STATUS(hipStreamCreate(&__h->kernel_stream)) != 0) return NULL; #if TORCH_HIP_VERSION >= 3020 if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return NULL; #else if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventBlockingSync)) != 0) return NULL; #endif __h->__arg0 = NULL; __h->__arg1 = NULL; __h->__arg2 = NULL; __h->__arg3 = NULL; __h->__arg4 = NULL; __h->__arg5 = NULL; __h->__arg6 = NULL; int device; hipGetDevice(&device); hipGetDeviceProperties(&(__h->devProp), device); return __h; } // --------------------------------------------------------------------------- // hmpp_freeInstance() // --------------------------------------------------------------------------- HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h) { delete __h->__arg0; delete __h->__arg1; delete __h->__arg2; delete __h->__arg3; delete __h->__arg4; delete __h->__arg5; delete __h->__arg6; hipStreamDestroy(__h->kernel_stream); hipEventDestroy(__h->kernel_event); __h->kernel_stream = 0; for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; } delete(__h); return 0; } // --------------------------------------------------------------------------- // hmpp_allocateOnHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { __h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT); return __h->__arg0->allocate2(dim, size); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { __h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT); return __h->__arg1->allocate2(dim, size); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { __h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT); return __h->__arg2->allocate2(dim, size); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { __h->__arg3 = new Data<float,DefaultPolicy>("__arg3", DEFAULT); return __h->__arg3->allocate2(dim, size); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { __h->__arg4 = new Data<float,DefaultPolicy>("__arg4", DEFAULT); return __h->__arg4->allocate2(dim, size); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { __h->__arg5 = new Data<float,DefaultPolicy>("__arg5", DEFAULT); return __h->__arg5->allocate2(dim, size); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { __h->__arg6 = new Data<float,DefaultPolicy>("__arg6", DEFAULT); return __h->__arg6->allocate2(dim, size); } default: return -1; } } HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } // --------------------------------------------------------------------------- // hmpp_readDataFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->download(data,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->download(data,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->download(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->download(data,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->download(data,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->download(data,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->download(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->upload(data,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->upload(data,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->upload(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->upload(data,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->upload(data,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->upload(data,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->upload(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_readDataSectionFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->downloadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataSectionToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->uploadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForWriteTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->waitTransfer(); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->waitTransfer(); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForReadTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->waitTransfer(); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->waitTransfer(); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_codeletsAreReentrant() // --------------------------------------------------------------------------- HMPP_API int hmpp_codeletsAreReentrant() { return 0; } // --------------------------------------------------------------------------- // hmpp_start() // --------------------------------------------------------------------------- HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async) { int status = 0; switch(__id) { case 1: // hmpp_codelet__threeMMloopa(__arg2,__arg3,__arg0) hmpp_codelet__threeMMloopa(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg2), (*__h->__arg3), (*__h->__arg0)); return status; case 2: // hmpp_codelet__threeMMloopb(__arg4,__arg5,__arg1) hmpp_codelet__threeMMloopb(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg4), (*__h->__arg5), (*__h->__arg1)); return status; case 3: // hmpp_codelet__threeMMloopc(__arg0,__arg1,__arg6) hmpp_codelet__threeMMloopc(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg6)); return status; } return -1; } // --------------------------------------------------------------------------- // hmpp_wait() // --------------------------------------------------------------------------- HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id) { return CHECK_STATUS(hipStreamSynchronize(__h->kernel_stream)); } // --------------------------------------------------------------------------- // hmpp_version() // --------------------------------------------------------------------------- HMPP_API int hmpp_version() { #ifndef HMPP_RUNTIME_TARGET_VERSION #define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8)) #endif return HMPP_RUNTIME_TARGET_VERSION(2,5); } //
9bca041eb486d82ed1c8d21fe32efa3be06d57ff.cu
// ** Original codelet code ** // // #pragma hmppcg cpiparam __arg0 INOUT e%hmpp_codelet__threeMMloopa: (1, 2) e%hmpp_codelet__threeMMloopc: (3, 0) // #pragma hmppcg cpiparam __arg1 INOUT f%hmpp_codelet__threeMMloopb: (2, 2) f%hmpp_codelet__threeMMloopc: (3, 1) // #pragma hmppcg cpiparam __arg2 INOUT a%hmpp_codelet__threeMMloopa: (1, 0) // #pragma hmppcg cpiparam __arg3 INOUT b%hmpp_codelet__threeMMloopa: (1, 1) // #pragma hmppcg cpiparam __arg4 INOUT c%hmpp_codelet__threeMMloopb: (2, 0) // #pragma hmppcg cpiparam __arg5 INOUT d%hmpp_codelet__threeMMloopb: (2, 1) // #pragma hmppcg cpiparam __arg6 INOUT g%hmpp_codelet__threeMMloopc: (3, 2) // // #pragma hmppcg cpicall hmpp_codelet__threeMMloopa(__arg2, __arg3, __arg0): 1 // #pragma hmppcg cpicall hmpp_codelet__threeMMloopb(__arg4, __arg5, __arg1): 2 // #pragma hmppcg cpicall hmpp_codelet__threeMMloopc(__arg0, __arg1, __arg6): 3 // // // /* begin of extracted source code for directive set "group1" */ // // // # 32 "threemm.c" // typedef float DATA_TYPE; // // // # 42 "threemm.c" // void hmpp_codelet__threeMMloopa(DATA_TYPE a[512][512], DATA_TYPE b[512][512], DATA_TYPE e[512][512]) // { // int i, j, k; // // // #pragma hmppcg grid blocksize 32 X 8 // # 10 "<preprocessor>" // # 49 "threemm.c" // #pragma hmppcg parallel // # 13 "<preprocessor>" // # 50 "threemm.c" // for (i = 0 ; i < 512 ; i++) // { // #pragma hmppcg parallel // # 18 "<preprocessor>" // # 53 "threemm.c" // for (j = 0 ; j < 512 ; j++) // { // e[i][j] = 0; // // #pragma hmppcg noParallel // # 25 "<preprocessor>" // # 58 "threemm.c" // for (k = 0 ; k < 512 ; ++k) // { // e[i][j] += a[i][k] * b[k][j]; // } // } // } // } // // // # 67 "threemm.c" // void hmpp_codelet__threeMMloopb(DATA_TYPE c[512][512], DATA_TYPE d[512][512], DATA_TYPE f[512][512]) // { // int i, j, k; // // // #pragma hmppcg grid blocksize 32 X 8 // # 10 "<preprocessor>" // # 74 "threemm.c" // #pragma hmppcg parallel // # 13 "<preprocessor>" // # 75 "threemm.c" // for (i = 0 ; i < 512 ; i++) // { // #pragma hmppcg parallel // # 18 "<preprocessor>" // # 78 "threemm.c" // for (j = 0 ; j < 512 ; j++) // { // f[i][j] = 0; // // #pragma hmppcg noParallel // # 25 "<preprocessor>" // # 83 "threemm.c" // for (k = 0 ; k < 512 ; ++k) // { // f[i][j] += c[i][k] * d[k][j]; // } // } // } // } // // // # 92 "threemm.c" // void hmpp_codelet__threeMMloopc(DATA_TYPE e[512][512], DATA_TYPE f[512][512], DATA_TYPE g[512][512]) // { // int i, j, k; // // // #pragma hmppcg grid blocksize 32 X 8 // # 10 "<preprocessor>" // # 99 "threemm.c" // #pragma hmppcg parallel // # 13 "<preprocessor>" // # 100 "threemm.c" // for (i = 0 ; i < 512 ; i++) // { // // #pragma hmppcg parallel // # 19 "<preprocessor>" // # 104 "threemm.c" // for (j = 0 ; j < 512 ; j++) // { // g[i][j] = 0; // // #pragma hmppcg noParallel // # 26 "<preprocessor>" // # 109 "threemm.c" // for (k = 0 ; k < 512 ; ++k) // { // g[i][j] += e[i][k] * f[k][j]; // } // } // } // } // // // /* end of extracted source code for directive set "group1" */ // // // // ** End of original codelet codelet ** #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _MSC_VER # define HMPPCG_RESTRICT typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; # ifdef _WIN64 typedef int64_t intptr_t; # else typedef int32_t intptr_t; # endif #else # if defined(__GNUC__) || defined(__RESTRICT) # define HMPPCG_RESTRICT __restrict # else # define HMPPCG_RESTRICT # endif # include <stdint.h> #endif // Dynamic array typedef struct __hmppcg_array_struct { void *array; size_t *size; size_t elsize; } __hmppcg_array_t; // Data section typedef struct __hmppcg_DataSection { size_t from; size_t to; size_t step; } __hmppcg_DataSection; #include <cuda.h> #if CUDART_VERSION < 2000 #error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required. #endif #define HMPP_CONSTMEM_OFFSET 0 #include <map> #include <string> // ---------------------------------------------------------------------------- // HMPP CUDA support classes // ---------------------------------------------------------------------------- #ifndef __HMPP_CUDADATA_H__ #define __HMPP_CUDADATA_H__ #ifndef HMPPCG_WARP_SIZE #define HMPPCG_WARP_SIZE 32 #endif enum CopyKind { HostToHost = 0, HostToDevice = 1, DeviceToHost = 2, DeviceToDevice = 3, }; inline int hmppcg_check_status(const char *file,int line,cudaError_t status) { if(status != cudaSuccess) { fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line, cudaGetErrorString(status)); return -1; } return 0; } #define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X)) #define HMPP_CHECK_GRID_BOUNDARY(x) \ if(x>65535){\ fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\ exit(-1) ;\ } #define HMPP_CHECK_BLOCK_BOUNDARY(x) \ if(x > devProp.maxThreadsPerBlock){ \ fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \ exit(-1) ; \ } // ---------------------------------------------------------------------------- // class DefaultPolicy // ---------------------------------------------------------------------------- struct DefaultPolicy { public: DefaultPolicy() { } virtual ~DefaultPolicy() { } int deviceAlloc(void **ptr,size_t size) { if( CHECK_STATUS(cudaStreamCreate(&stream_)) != 0 ) return -1; if( CHECK_STATUS(cudaMalloc(ptr,size)) != 0 ) return -1; #if CUDA_VERSION >= 3020 if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0) return -1; #endif return 0; } int deviceFree(void *ptr) { if( CHECK_STATUS(cudaStreamDestroy(stream_)) != 0) return -1; if( CHECK_STATUS(cudaFree(ptr)) != 0) return -1; if( CHECK_STATUS(cudaEventDestroy(event)) != 0) return -1; return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { static cudaMemcpyKind cudaKind[] = {cudaMemcpyHostToHost, cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, cudaMemcpyDeviceToDevice }; if(async) { return CHECK_STATUS(cudaMemcpyAsync(dst,src,size,cudaKind[kind],stream_)); } else { return CHECK_STATUS(cudaMemcpy(dst,src,size,cudaKind[kind])); } } int makeStreamWait(cudaStream_t wstream) { int status; status = CHECK_STATUS(cudaEventRecord(event, stream_)); if (status != 0) return status; #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(cudaEventSynchronize(event)); #endif } int waitOnEvent(cudaEvent_t wevent) { #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(stream_, wevent, 0)); #else return CHECK_STATUS(cudaEventSynchronize(wevent)); #endif } int deviceWait() { return CHECK_STATUS(cudaStreamSynchronize(stream_)); } private: cudaStream_t stream_; cudaEvent_t event; }; // ---------------------------------------------------------------------------- // class ConstantPolicy // ---------------------------------------------------------------------------- #ifndef HMPP_CONSTMEM_SIZE #define HMPP_CONSTMEM_SIZE 2048 #endif __constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8]; /// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error) extern __shared__ int64_t hmpp_sharedmem[]; struct ConstantPolicy { public: ConstantPolicy() { static bool initialized = false; if(!initialized) { next_offset_ = HMPP_CONSTMEM_OFFSET; initialized = true; } offset_ = -1; } virtual ~ConstantPolicy() { } void setStaticOffset(int offset) { offset_ = offset; while(offset_ % 8) offset_ ++; } int deviceAlloc(void **ptr, size_t size) { #if CUDA_VERSION >= 3020 if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return -1; #else if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0) return -1; #endif if(offset_ != -1) { if((offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)offset_; return 0; } if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE) return -1; (*ptr) = (void *)next_offset_; next_offset_ += size; return 0; } int deviceFree(void *ptr) { return 0; } int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async) { size_t offset; switch(kind) { case HostToDevice: offset = (size_t)dst; return CHECK_STATUS(cudaMemcpyToSymbol(hmpp_constmem,src,size,offset,cudaMemcpyHostToDevice)); case DeviceToHost: offset = (size_t)src; return CHECK_STATUS(cudaMemcpyFromSymbol(dst,hmpp_constmem,size,offset,cudaMemcpyDeviceToHost)); default: return -1; } } int makeStreamWait(cudaStream_t wstream) { int status; /* stream 0 at the moment */ status = CHECK_STATUS(cudaEventRecord(event, 0)); if (status != 0) return status; #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0)); #else return CHECK_STATUS(cudaEventSynchronize(event)); #endif } int waitOnEvent(cudaEvent_t wevent) { /* stream 0 at the moment */ #if CUDA_VERSION >= 3020 return CHECK_STATUS(cudaStreamWaitEvent(0, wevent, 0)); #else return CHECK_STATUS(cudaEventSynchronize(wevent)); #endif } int deviceWait() { return 0; } private: static size_t next_offset_; int offset_; cudaEvent_t event; }; size_t ConstantPolicy::next_offset_; // ---------------------------------------------------------------------------- // class Lazy // ---------------------------------------------------------------------------- template <typename Policy> struct Lazy { char * value; bool valid; bool allocated; void ** devaddr; Policy * policy; size_t size; Lazy(size_t elem_size) { value = new char[elem_size]; } ~Lazy() { delete[] value; } int requireDeviceAlloc() { if(!allocated) { allocated = true; return policy->deviceAlloc(devaddr,size); } else { return 0; } } }; // ---------------------------------------------------------------------------- // class Element // ---------------------------------------------------------------------------- template <typename T,typename Policy> struct Element { Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy) : device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy) { } Element &operator=(const T & value) { if(lazy_) { *((T *)(lazy_->value)) = value; lazy_->valid = true; return *this; } if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false); return *this; } Element &operator=(const Element & src) { if(src.lazy_ && src.lazy_->valid) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)(src.lazy_->value)); return *this; } if(lazy_) lazy_->requireDeviceAlloc(); if(src.lazy_) src.lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_, ElemSize,DeviceToDevice,false); if(lazy_) { lazy_->valid = false; } return *this; } operator T() { if(lazy_ && lazy_->valid) return *((T *)(lazy_->value)); T res; if(lazy_) lazy_->requireDeviceAlloc(); policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false); if(lazy_) { *((T *)(lazy_->value)) = res; lazy_->valid = true; } return res; } typedef T Type; enum { ElemSize = sizeof(T) }; private: size_t offset_; void *const* device_addr_; Policy *policy_; public: Lazy<Policy> * lazy_; }; enum DataFlags { DEFAULT = 0x0, LAZY = 0x1 }; // ---------------------------------------------------------------------------- // class Data // ---------------------------------------------------------------------------- template <typename T,typename Policy> class Data { public: typedef T Type; typedef Element<T,Policy> ElementType; enum { ElemSize = sizeof(T) }; Data(const char * name, unsigned int flags = DEFAULT) : name_(name), flags_(flags), dim_(0), sizes_(0), size_(0), host_addr_(0), device_addr_(0) { policy_ = new Policy; if(flags_ & LAZY) { lazy_ = new Lazy<Policy>(ElemSize); lazy_->valid = false; lazy_->devaddr = 0; lazy_->policy = policy_; } else lazy_ = 0; } ~Data() { free(); delete policy_; if(lazy_) delete lazy_; } int allocate(unsigned int dim, size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return allocate2(dim,sizes); } int allocate3(unsigned int dim_p, const size_t * sizes_p) { size_t sizes[2]; sizes[0] = 1; sizes[1] = 0; for(int d = 0 ; d < dim_p ; d++) { sizes[0] *= sizes_p[d]; } return allocate2(1, sizes); } int allocate2(unsigned int dim, const size_t * sizes) { dim_ = dim; sizes_ = new size_t[dim]; dimSizes_ = new size_t[dim]; size_ = ElemSize; for(int d=0;d<dim;d++) { sizes_[d] = sizes[d]; size_ *= sizes_[d]; size_t size = 1; for(int d2=d+1;d2<dim;d2++) size*=sizes[d2]; dimSizes_[d] = size; } if(lazy_) { lazy_->allocated = false; lazy_->devaddr = &device_addr_; lazy_->size = size_; return 0; } else return policy_->deviceAlloc(&device_addr_,size_); } int free() { if(sizes_) { delete [] sizes_; delete [] dimSizes_; sizes_ = 0; dim_ = 0; size_ = 0; } if(device_addr_) { if(policy_->deviceFree(device_addr_) != 0) return -1; device_addr_ = 0; } return 0; } int download(void * host_addr,bool async) { if(lazy_ && lazy_->valid) { *((T *)host_addr) = *((T *)(lazy_->value)); return 0; } if(lazy_) { lazy_->requireDeviceAlloc(); } int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async); if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = *((T *)host_addr); } return sts; } int upload(const void * host_addr,bool async) { if(lazy_) { lazy_->valid = true; *((T *)(lazy_->value)) = * ((T *)host_addr); lazy_->requireDeviceAlloc(); } return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async); } int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async); } int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async) { return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async); } int makeStreamWait(cudaStream_t wstream) { if(lazy_) lazy_->requireDeviceAlloc(); return policy_->makeStreamWait(wstream); } int waitOnEvent(cudaEvent_t wevent) { return policy_->waitOnEvent(wevent); } int waitTransfer() { return policy_->deviceWait(); } ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0, size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0, size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0) { size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB }; return at(sizes); } ElementType at(size_t *idx) { size_t offset = idx[0]; return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_); } template <typename Y> Element<Y,Policy> at(size_t offset) { return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_); } ElementType operator=(const T & value) { ElementType res(&device_addr_,0,policy_,lazy_); res = value; return res; } ElementType operator=(const Data &data) { return operator=(data.value()); } T value() const { ElementType res(&device_addr_,0,policy_,lazy_); return (T)res; } operator T() { return value(); } T *getDeviceAddr() { if(lazy_) lazy_->requireDeviceAlloc(); if(lazy_ && lazy_->valid) { policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false); } return (T*)device_addr_; } void invalidateLazy() { if(lazy_) { lazy_->valid = false; } } private: Data(const Data &data) {} int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async) { int d; int size = 1; for(d=cur+1;d<dim_;d++) size *= sizes_[d]; if(cur<(lastdense-1)) { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step) if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0) return -1; } else { int step = sections[cur].step; if(step == 1) { int start = (offset + sections[cur].from * size) * ElemSize; int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize; return policy_->deviceMemcpy(dst+start,src+start,total,kind,async); } else { int x; for(x=sections[cur].from;x<=sections[cur].to;x+=step) { int off = (offset + x * size) * ElemSize; if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0) return -1; } } } return 0; } int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async) { int i; int lastdense = dim_; for (i = dim_ - 1 ; i >= 0 ; i --) { if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1)) lastdense = i; else break; } return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async); } const char * name_; size_t flags_; void *device_addr_; void *host_addr_; size_t dim_; size_t *sizes_; size_t *dimSizes_; size_t size_; Lazy<Policy> * lazy_; public: Policy *policy_; }; // --------------------------------------------------------------------------- // User data // --------------------------------------------------------------------------- class UserData{ public: virtual ~UserData(){} UserData(){} }; #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef float2 __hmppcg_complex_float; #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef double2 __hmppcg_complex_double; // --------------------------------------------------------------------------- // Allocatable Arrays // --------------------------------------------------------------------------- template <const size_t nb_dims> struct AArrayDesc { int lbounds_[nb_dims]; size_t sizes_[nb_dims]; size_t wholesize_; }; #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \ { int alloc_ranges[] = { __VA_ARGS__ }; \ int hmppcg_alloc_i; \ var ## _aarray_desc.wholesize_ = 1; \ for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \ int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \ int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \ int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \ var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \ var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \ var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \ } \ if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \ return; \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE #define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \ { \ var.free(); \ } #endif #ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \ (var.getDeviceAddr() != NULL) #endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED #ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \ var ## _aarray_desc.wholesize_ #endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE #define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \ var ## _aarray_desc.sizes_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE #ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \ var ## _aarray_desc.lbounds_[d] #endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND #ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND #define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \ (var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1) #endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND #ifndef __HMPP_INT_POW_FUNC #define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ if(exp < 0) \ return 0; \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_INT_POW_FUNC( i64, int64_t ); __HMPP_INT_POW_FUNC( i32, int32_t ); __HMPP_INT_POW_FUNC( i16, int16_t ); __HMPP_INT_POW_FUNC( i8, int8_t ); #ifndef __HMPP_UINT_POW_FUNC #define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \ __device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \ { \ func_type result = 1; \ while (exp) \ { \ if (exp & 1) \ result *= base; \ exp >>= 1; \ base *= base; \ } \ return result; \ } #endif __HMPP_UINT_POW_FUNC( ui64, uint64_t ); __HMPP_UINT_POW_FUNC( ui32, uint32_t ); __HMPP_UINT_POW_FUNC( ui16, uint16_t ); __HMPP_UINT_POW_FUNC( ui8, uint8_t ); #endif // __HMPP_CUDADATA_H__ #ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED #define __HMPPCG_COMPLEX_DOUBLE_DEFINED typedef struct { double x; double y; }__hmppcg_complex_double; #endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */ #ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED #define __HMPPCG_COMPLEX_FLOAT_DEFINED typedef struct { float x; float y; }__hmppcg_complex_float; #endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */ template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__threeMMloopa_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT b, float * HMPPCG_RESTRICT e) { int32_t j_3; int32_t i_3; j_3 = (blockDimX__ * blockIdx.x + threadIdx.x); i_3 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_3 <= 511) & (i_3 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label1; }; e[(i_3 * 512) + j_3] = 0; { int32_t __hmppcg_end, k_3; for (k_3 = 0, __hmppcg_end = 511; k_3 <= __hmppcg_end; k_3 += 1) { e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + k_3]) * (b[(k_3 * 512) + j_3])); } } __hmppcg_label1:; } void hmpp_codelet__threeMMloopa( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & b, Data<float,DefaultPolicy> & e) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if CUDA_VERSION >= 3020 a.makeStreamWait(kernel_stream); b.makeStreamWait(kernel_stream); e.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hmpp_codelet__threeMMloopa_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(a.getDeviceAddr(), b.getDeviceAddr(), e.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return; #if CUDA_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return; a.waitOnEvent(kernel_event); b.waitOnEvent(kernel_event); e.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif }; } template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__threeMMloopb_loop0_( float * HMPPCG_RESTRICT c, float * HMPPCG_RESTRICT d, float * HMPPCG_RESTRICT f) { int32_t j_4; int32_t i_4; j_4 = (blockDimX__ * blockIdx.x + threadIdx.x); i_4 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_4 <= 511) & (i_4 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label3; }; f[(i_4 * 512) + j_4] = 0; { int32_t __hmppcg_end, k_4; for (k_4 = 0, __hmppcg_end = 511; k_4 <= __hmppcg_end; k_4 += 1) { f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + k_4]) * (d[(k_4 * 512) + j_4])); } } __hmppcg_label3:; } void hmpp_codelet__threeMMloopb( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & c, Data<float,DefaultPolicy> & d, Data<float,DefaultPolicy> & f) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if CUDA_VERSION >= 3020 c.makeStreamWait(kernel_stream); d.makeStreamWait(kernel_stream); f.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hmpp_codelet__threeMMloopb_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(c.getDeviceAddr(), d.getDeviceAddr(), f.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return; #if CUDA_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return; c.waitOnEvent(kernel_event); d.waitOnEvent(kernel_event); f.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif }; } template <const unsigned int blockDimX__, const unsigned int blockDimY__> __global__ void hmpp_codelet__threeMMloopc_loop0_( float * HMPPCG_RESTRICT e_11, float * HMPPCG_RESTRICT f_11, float * HMPPCG_RESTRICT g) { int32_t j_5; int32_t i_5; j_5 = (blockDimX__ * blockIdx.x + threadIdx.x); i_5 = (blockDimY__ * blockIdx.y + threadIdx.y); bool __hmppcg_guard = (!((j_5 <= 511) & (i_5 <= 511))); if(__hmppcg_guard) { goto __hmppcg_label5; }; g[(i_5 * 512) + j_5] = 0; { int32_t __hmppcg_end, k_5; for (k_5 = 0, __hmppcg_end = 511; k_5 <= __hmppcg_end; k_5 += 1) { g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + k_5]) * (f_11[(k_5 * 512) + j_5])); } } __hmppcg_label5:; } void hmpp_codelet__threeMMloopc( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & e_1, Data<float,DefaultPolicy> & f_1, Data<float,DefaultPolicy> & g) { if(1LL) { unsigned int gridDimX__ = 16LL; HMPP_CHECK_GRID_BOUNDARY(gridDimX__); unsigned int gridDimY__ = 64LL; HMPP_CHECK_GRID_BOUNDARY(gridDimY__); dim3 dim_grid(gridDimX__, gridDimY__); const unsigned int blockDimX__ = 32LL; const unsigned int blockDimY__ = 8LL; HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__); #if CUDA_VERSION >= 3020 e_1.makeStreamWait(kernel_stream); f_1.makeStreamWait(kernel_stream); g.makeStreamWait(kernel_stream); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif dim3 dim_block(blockDimX__, blockDimY__); hmpp_codelet__threeMMloopc_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(e_1.getDeviceAddr(), f_1.getDeviceAddr(), g.getDeviceAddr()); if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return; #if CUDA_VERSION >= 3020 if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return; e_1.waitOnEvent(kernel_event); f_1.waitOnEvent(kernel_event); g.waitOnEvent(kernel_event); #else if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return; #endif }; } // HMPP_API #ifdef __cplusplus #define HMPP_EXTERN extern "C" #else #define HMPP_EXTERN #endif #ifdef _WIN32 #define HMPP_EXPORT __declspec(dllexport) #define HMPP_INLINE __inline #else #define HMPP_EXPORT #define HMPP_INLINE inline #endif #define HMPP_API HMPP_EXTERN HMPP_EXPORT // HMPPCG_POP_HASH #define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor)) // --------------------------------------------------------------------------- // HMPP handle // --------------------------------------------------------------------------- typedef struct hmpp_handle_struct { Data<float,DefaultPolicy> * __arg0; Data<float,DefaultPolicy> * __arg1; Data<float,DefaultPolicy> * __arg2; Data<float,DefaultPolicy> * __arg3; Data<float,DefaultPolicy> * __arg4; Data<float,DefaultPolicy> * __arg5; Data<float,DefaultPolicy> * __arg6; cudaDeviceProp devProp; cudaStream_t kernel_stream; cudaEvent_t kernel_event; std::map<std::string,UserData*> map_user_data; } hmpp_handle_t; // --------------------------------------------------------------------------- // hmpp_createInstance() // --------------------------------------------------------------------------- HMPP_API hmpp_handle_t * hmpp_createInstance() { hmpp_handle_t * __h = new hmpp_handle_t; if(!__h) return 0; if(CHECK_STATUS(cudaStreamCreate(&__h->kernel_stream)) != 0) return NULL; #if CUDA_VERSION >= 3020 if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return NULL; #else if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventBlockingSync)) != 0) return NULL; #endif __h->__arg0 = NULL; __h->__arg1 = NULL; __h->__arg2 = NULL; __h->__arg3 = NULL; __h->__arg4 = NULL; __h->__arg5 = NULL; __h->__arg6 = NULL; int device; cudaGetDevice(&device); cudaGetDeviceProperties(&(__h->devProp), device); return __h; } // --------------------------------------------------------------------------- // hmpp_freeInstance() // --------------------------------------------------------------------------- HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h) { delete __h->__arg0; delete __h->__arg1; delete __h->__arg2; delete __h->__arg3; delete __h->__arg4; delete __h->__arg5; delete __h->__arg6; cudaStreamDestroy(__h->kernel_stream); cudaEventDestroy(__h->kernel_event); __h->kernel_stream = 0; for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; } delete(__h); return 0; } // --------------------------------------------------------------------------- // hmpp_allocateOnHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { __h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT); return __h->__arg0->allocate2(dim, size); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { __h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT); return __h->__arg1->allocate2(dim, size); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { __h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT); return __h->__arg2->allocate2(dim, size); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { __h->__arg3 = new Data<float,DefaultPolicy>("__arg3", DEFAULT); return __h->__arg3->allocate2(dim, size); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { __h->__arg4 = new Data<float,DefaultPolicy>("__arg4", DEFAULT); return __h->__arg4->allocate2(dim, size); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { __h->__arg5 = new Data<float,DefaultPolicy>("__arg5", DEFAULT); return __h->__arg5->allocate2(dim, size); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { __h->__arg6 = new Data<float,DefaultPolicy>("__arg6", DEFAULT); return __h->__arg6->allocate2(dim, size); } default: return -1; } } HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim) { return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); } // --------------------------------------------------------------------------- // hmpp_readDataFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->download(data,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->download(data,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->download(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->download(data,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->download(data,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->download(data,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->download(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->upload(data,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->upload(data,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->upload(data,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->upload(data,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->upload(data,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->upload(data,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->upload(data,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_readDataSectionFromHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->downloadSection(data,section,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->downloadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_writeDataSectionToHWA() // --------------------------------------------------------------------------- HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->uploadSection(data,section,async!=0); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->uploadSection(data,section,async!=0); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForWriteTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->waitTransfer(); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->waitTransfer(); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_waitForReadTransfer() // --------------------------------------------------------------------------- HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor) { switch(HMPPCG_POP_HASH(major,minor)) { case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc { return __h->__arg0->waitTransfer(); } case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc { return __h->__arg1->waitTransfer(); } case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa { return __h->__arg2->waitTransfer(); } case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa { return __h->__arg3->waitTransfer(); } case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb { return __h->__arg4->waitTransfer(); } case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb { return __h->__arg5->waitTransfer(); } case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc { return __h->__arg6->waitTransfer(); } default: return -1; } } // --------------------------------------------------------------------------- // hmpp_codeletsAreReentrant() // --------------------------------------------------------------------------- HMPP_API int hmpp_codeletsAreReentrant() { return 0; } // --------------------------------------------------------------------------- // hmpp_start() // --------------------------------------------------------------------------- HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async) { int status = 0; switch(__id) { case 1: // hmpp_codelet__threeMMloopa(__arg2,__arg3,__arg0) hmpp_codelet__threeMMloopa(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg2), (*__h->__arg3), (*__h->__arg0)); return status; case 2: // hmpp_codelet__threeMMloopb(__arg4,__arg5,__arg1) hmpp_codelet__threeMMloopb(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg4), (*__h->__arg5), (*__h->__arg1)); return status; case 3: // hmpp_codelet__threeMMloopc(__arg0,__arg1,__arg6) hmpp_codelet__threeMMloopc(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg6)); return status; } return -1; } // --------------------------------------------------------------------------- // hmpp_wait() // --------------------------------------------------------------------------- HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id) { return CHECK_STATUS(cudaStreamSynchronize(__h->kernel_stream)); } // --------------------------------------------------------------------------- // hmpp_version() // --------------------------------------------------------------------------- HMPP_API int hmpp_version() { #ifndef HMPP_RUNTIME_TARGET_VERSION #define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8)) #endif return HMPP_RUNTIME_TARGET_VERSION(2,5); } //
f093b5fc27b8cd15d4db91ee05c7a87bcd4db909.hip
// !!! This is a file automatically generated by hipify!!! #include "fast_pcl/ndt_gpu/MatrixDevice.h" #include "fast_pcl/ndt_gpu/debug.h" namespace gpu { MatrixDevice::MatrixDevice(int rows, int cols) { rows_ = rows; cols_ = cols; offset_ = 1; fr_ = true; checkCudaErrors(hipMalloc(&buffer_, sizeof(double) * rows_ * cols_ * offset_)); checkCudaErrors(hipMemset(buffer_, 0, sizeof(double) * rows_ * cols_ * offset_)); checkCudaErrors(hipDeviceSynchronize()); } void MatrixDevice::memFree() { if (fr_) { if (buffer_ != NULL) checkCudaErrors(hipFree(buffer_)); } } SquareMatrixDevice::SquareMatrixDevice(int size) : MatrixDevice(size, size) { } }
f093b5fc27b8cd15d4db91ee05c7a87bcd4db909.cu
#include "fast_pcl/ndt_gpu/MatrixDevice.h" #include "fast_pcl/ndt_gpu/debug.h" namespace gpu { MatrixDevice::MatrixDevice(int rows, int cols) { rows_ = rows; cols_ = cols; offset_ = 1; fr_ = true; checkCudaErrors(cudaMalloc(&buffer_, sizeof(double) * rows_ * cols_ * offset_)); checkCudaErrors(cudaMemset(buffer_, 0, sizeof(double) * rows_ * cols_ * offset_)); checkCudaErrors(cudaDeviceSynchronize()); } void MatrixDevice::memFree() { if (fr_) { if (buffer_ != NULL) checkCudaErrors(cudaFree(buffer_)); } } SquareMatrixDevice::SquareMatrixDevice(int size) : MatrixDevice(size, size) { } }
34e8a81cd460aa150d1c9b4c1b72e1ae865603d5.hip
// !!! This is a file automatically generated by hipify!!! /* Jim - A small embeddable Tcl interpreter * * Copyright 2005 Salvatore Sanfilippo <[email protected]> * Copyright 2005 Clemens Hintze <[email protected]> * Copyright 2005 patthoyts - Pat Thoyts <[email protected]> * Copyright 2008,2009 oharboe - yvind Harboe - [email protected] * Copyright 2008 Andrew Lunn <[email protected]> * Copyright 2008 Duane Ellis <[email protected]> * Copyright 2008 Uwe Klein <[email protected]> * Copyright 2008 Steve Bennett <[email protected]> * Copyright 2009 Nico Coesel <[email protected]> * Copyright 2009 Zachary T Welch [email protected] * Copyright 2009 David Brownell * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE JIM TCL PROJECT ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * JIM TCL PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation * are those of the authors and should not be interpreted as representing * official policies, either expressed or implied, of the Jim Tcl Project. **/ #pragma region Preamble #define JIM_OPTIMIZATION // comment to avoid optimizations and reduce size //#include <stdiocu.h> #include <stringcu.h> //#include <stdargcu.h> //#include <ctypecu.h> //#include <limitscu.h> #include <errnocu.h> #include <stdlibcu.h> #include <setjmpcu.h> #include <assert.h> #include <timecu.h> #include "jim.h" #include "jimautoconf.h" #include "utf8.h" #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef HAVE_BACKTRACE #include <execinfo.h> #endif #ifdef HAVE_CRT_EXTERNS_H #include <crt_externs.h> #endif // For INFINITY, even if math functions are not enabled #include <math.h> // We may decide to switch to using $[...] after all, so leave it as an option /*#define EXPRSUGAR_BRACKET*/ // For the no-autoconf case #ifndef TCL_LIBRARY #define TCL_LIBRARY "." #endif #ifndef TCL_PLATFORM_OS #define TCL_PLATFORM_OS "unknown" #endif #ifndef TCL_PLATFORM_PLATFORM #define TCL_PLATFORM_PLATFORM "unknown" #endif #ifndef TCL_PLATFORM_PATH_SEPARATOR #define TCL_PLATFORM_PATH_SEPARATOR ":" #endif // GPUEX@begin: turn these back off //#define DEBUG_SHOW_SCRIPT //#define DEBUG_SHOW_SCRIPT_TOKENS //#define DEBUG_SHOW_SUBST //#define DEBUG_SHOW_EXPR //#define DEBUG_SHOW_EXPR_TOKENS //#define JIM_DEBUG_GC // GPUEX@end #ifdef JIM_MAINTAINER #define JIM_DEBUG_COMMAND #define JIM_DEBUG_PANIC #endif // Enable this (in conjunction with valgrind) to help debug reference counting issues /*#define JIM_DISABLE_OBJECT_POOL*/ // Maximum size of an integer #define JIM_INTEGER_SPACE 24 __device__ const char *jim_tt_name(int type); #ifdef JIM_DEBUG_PANIC static __device__ void JimPanicDump(int condition, const char *fmt, ...); #define JimPanic JimPanicDump //(c, msg, ...) JimPanicDump(c, msg, __VA_ARGS__) #else #define JimPanic(c, msg, ...) #endif #pragma endregion // ----------------------------------------------------------------------------- // Global variables // ----------------------------------------------------------------------------- #pragma region Global variables // A shared empty string for the objects string representation. Jim_InvalidateStringRep knows about it and doesn't try to free it. __constant__ static char JimEmptyStringRep[] = ""; #pragma endregion // ----------------------------------------------------------------------------- // Required prototypes of not exported functions // ----------------------------------------------------------------------------- #pragma region Required prototypes of not exported functions static __device__ void JimFreeCallFrame(Jim_Interp *interp, Jim_CallFrame *cf, int action); static __device__ int ListSetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int listindex, Jim_Obj *newObjPtr, int flags); static __device__ int JimDeleteLocalProcs(Jim_Interp *interp, Jim_Stack *localCommands); static __device__ Jim_Obj *JimExpandDictSugar(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void SetDictSubstFromAny(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ Jim_Obj **JimDictPairs(Jim_Obj *dictPtr, int *len); static __device__ void JimSetFailedEnumResult(Jim_Interp *interp, const char *arg, const char *badtype, const char *prefix, const char *const *tablePtr, const char *name); static __device__ int JimCallProcedure(Jim_Interp *interp, Jim_Cmd *cmd, int argc, Jim_Obj *const *argv); static __device__ int JimGetWideNoErr(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide* widePtr); static __device__ int JimSign(jim_wide w); static __device__ int JimValidName(Jim_Interp *interp, const char *type, Jim_Obj *nameObjPtr); static __device__ void JimPrngSeed(Jim_Interp *interp, unsigned char *seed, int seedLen); static __device__ void JimRandomBytes(Jim_Interp *interp, void *dest, unsigned int len); // Fast access to the int (wide) value of an object which is known to be of int type #define JimWideValue(objPtr) (objPtr)->internalRep.wideValue #define JimObjTypeName(O) ((O)->typePtr ? (O)->typePtr->name : "none") static __device__ int utf8_tounicode_case(const char *s, int *uc, int upper) { int l = utf8_tounicode(s, uc); if (upper) { *uc = utf8_upper(*uc); } return l; } // These can be used in addition to JIM_CASESENS/JIM_NOCASE #define JIM_CHARSET_SCAN 2 #define JIM_CHARSET_GLOB 0 // pattern points to a string like "[^a-z\ub5]" // The pattern may contain trailing chars, which are ignored. // The pattern is matched against unicode char 'c'. // If (flags & JIM_NOCASE), case is ignored when matching. // If (flags & JIM_CHARSET_SCAN), the considers ^ and ] special at the start of the charset, per scan, rather than glob/string match. // If the unicode char 'c' matches that set, returns a pointer to the ']' character, or the null character if the ']' is missing. // Returns NULL on no match. static __device__ const char *JimCharsetMatch(const char *pattern, int c, int flags) { int not_ = 0; int pchar; int match = 0; int nocase = 0; if (flags & JIM_NOCASE) { nocase++; c = utf8_upper(c); } if (flags & JIM_CHARSET_SCAN) { if (*pattern == '^') { not_++; pattern++; } // Special case. If the first char is ']', it is part of the set if (*pattern == ']') goto first; } while (*pattern && *pattern != ']') { // Exact match if (pattern[0] == '\\') { first: pattern += utf8_tounicode_case(pattern, &pchar, nocase); } else { // Is this a range? a-z int start; int end; pattern += utf8_tounicode_case(pattern, &start, nocase); if (pattern[0] == '-' && pattern[1]) { // skip '-' pattern += utf8_tounicode(pattern, &pchar); pattern += utf8_tounicode_case(pattern, &end, nocase); // Handle reversed range too if ((c >= start && c <= end) || (c >= end && c <= start)) match = 1; continue; } pchar = start; } if (pchar == c) match = 1; } if (not_) match = !match; return (match ? pattern : nullptr); } // Glob-style pattern matching. // Note: string *must* be valid UTF-8 sequences static __device__ int JimGlobMatch(const char *pattern, const char *string, int nocase) { int c; int pchar; while (*pattern) { switch (pattern[0]) { case '*': while (pattern[1] == '*') pattern++; pattern++; if (!pattern[0]) return 1; // match while (*string) { // Recursive call - Does the remaining pattern match anywhere? if (JimGlobMatch(pattern, string, nocase)) return 1; // match string += utf8_tounicode(string, &c); } return 0; // no match case '?': string += utf8_tounicode(string, &c); break; case '[': { string += utf8_tounicode(string, &c); pattern = JimCharsetMatch(pattern + 1, c, nocase ? JIM_NOCASE : 0); if (!pattern) return 0; if (!*pattern) continue; // Ran out of pattern (no ']') break; } case '\\': if (pattern[1]) pattern++; // fall through default: string += utf8_tounicode_case(string, &c, nocase); utf8_tounicode_case(pattern, &pchar, nocase); if (pchar != c) return 0; break; } pattern += utf8_tounicode_case(pattern, &pchar, nocase); if (!*string) { while (*pattern == '*') pattern++; break; } } return (!*pattern && !*string ? 1 : 0); } // string comparison. Works on binary data. // Returns -1, 0 or 1 // Note that the lengths are byte lengths, not char lengths. static __device__ int JimStringCompare(const char *s1, int l1, const char *s2, int l2) { if (l1 < l2) return (memcmp(s1, s2, l1) <= 0 ? -1 : 1); else if (l2 < l1) return (memcmp(s1, s2, l2) >= 0 ? 1 : -1); else return JimSign(memcmp(s1, s2, l1)); } // Compare null terminated strings, up to a maximum of 'maxchars' characters, (or end of string if 'maxchars' is -1). // Returns -1, 0, 1 for s1 < s2, s1 == s2, s1 > s2 respectively. // Note: does not support embedded nulls. static __device__ int JimStringCompareLen(const char *s1, const char *s2, int maxchars, int nocase) { while (*s1 && *s2 && maxchars) { int c1, c2; s1 += utf8_tounicode_case(s1, &c1, nocase); s2 += utf8_tounicode_case(s2, &c2, nocase); if (c1 != c2) return JimSign(c1 - c2); maxchars--; } if (!maxchars) return 0; // One string or both terminated if (*s1) return 1; if (*s2) return -1; return 0; } // Search 's1' inside 's2', starting to search from char 'index' of 's2'. The index of the first occurrence of s1 in s2 is returned. // If s1 is not found inside s2, -1 is returned. static __device__ int JimStringFirst(const char *s1, int l1, const char *s2, int l2, int idx) { if (!l1 || !l2 || l1 > l2) return -1; if (idx < 0) idx = 0; s2 += utf8_index(s2, idx); int l1bytelen = utf8_index(s1, l1); for (int i = idx; i <= l2 - l1; i++) { if (!memcmp(s2, s1, l1bytelen)) return i; int c; UNUSED_SYMBOL(c); s2 += utf8_tounicode(s2, &c); } return -1; } // Note: Lengths and return value are in bytes, not chars. static __device__ int JimStringLast(const char *s1, int l1, const char *s2, int l2) { if (!l1 || !l2 || l1 > l2) return -1; // Now search for the needle for (const char *p = s2 + l2 - 1; p != s2 - 1; p--) if (*p == *s1 && !memcmp(s1, p, l1)) return (int)(p - s2); return -1; } #ifdef JIM_UTF8 // Note: Lengths and return value are in chars. static __device__ int JimStringLastUtf8(const char *s1, int l1, const char *s2, int l2) { int n = JimStringLast(s1, utf8_index(s1, l1), s2, utf8_index(s2, l2)); if (n > 0) n = utf8_strlen(s2, n); return n; } #endif // After an strtol()/strtod()-like conversion, check whether something was converted and that the only thing left is white space. // Returns JIM_OK or JIM_ERROR. static __device__ int JimCheckConversion(const char *str, const char *endptr) { if (str[0] == '\0' || str == endptr) return JIM_ERROR; if (endptr[0] != '\0') { while (*endptr) { if (!isspace((unsigned char)*endptr)) return JIM_ERROR; endptr++; } } return JIM_OK; } // Parses the front of a number to determine it's sign and base // Returns the index to start parsing according to the given base static __device__ int JimNumberBase(const char *str, int *base, int *sign) { int i = 0; *base = 10; while (isspace(str[i])) i++; if (str[i] == '-') { *sign = -1; i++; } else { if (str[i] == '+') { i++; } *sign = 1; } if (str[i] != '0') return 0; // base 10 // We have 0<x>, so see if we can convert it switch (str[i + 1]) { case 'x': case 'X': *base = 16; break; case 'o': case 'O': *base = 8; break; case 'b': case 'B': *base = 2; break; default: return 0; } i += 2; // Ensure that (e.g.) 0x-5 fails to parse if (str[i] != '-' && str[i] != '+' && !isspace(str[i])) // Parse according to this base return i; // Parse as base 10 *base = 10; return 0; } // Converts a number as per strtol(..., 0) except leading zeros do *not* imply octal. Instead, decimal is assumed unless the number begins with 0x, 0o or 0b static __device__ long jim_strtol(const char *str, char **endptr) { int sign; int base; int i = JimNumberBase(str, &base, &sign); if (base != 10) { long value = strtol(str + i, endptr, base); if (endptr == NULL || *endptr != str + i) return value * sign; } // Can just do a regular base-10 conversion return strtol(str, endptr, 10); } // Converts a number as per strtoull(..., 0) except leading zeros do *not* imply octal. Instead, decimal is assumed unless the number begins with 0x, 0o or 0b static __device__ jim_wide jim_strtoull(const char *str, char **endptr) { if (!strcmp(str, "true")) { *endptr = (char *)(str+4); return 1; } if (!strcmp(str, "false")) { *endptr = (char *)(str+5); return 0; } #ifdef HAVE_LONG_LONG int sign; int base; int i = JimNumberBase(str, &base, &sign); if (base != 10) { jim_wide value = strtoull(str + i, endptr, base); if (endptr == NULL || *endptr != str + i) return value * sign; } // Can just do a regular base-10 conversion return strtoull(str, endptr, 10); #else return (unsigned long)jim_strtol(str, endptr); #endif } __device__ int Jim_StringToWide(const char *str, jim_wide *widePtr, int base) { char *endptr; *widePtr = (base ? strtoull(str, &endptr, base) : jim_strtoull(str, &endptr)); return JimCheckConversion(str, endptr); } __device__ int Jim_StringToDouble(const char *str, double *doublePtr) { char *endptr; // Callers can check for underflow via ERANGE errno = 0; *doublePtr = strtod(str, &endptr); return JimCheckConversion(str, endptr); } static __device__ jim_wide JimPowWide(jim_wide b, jim_wide e) { jim_wide i, res = 1; if ((b == 0 && e != 0) || e < 0) return 0; for (i = 0; i < e; i++) res *= b; return res; } #pragma endregion // ----------------------------------------------------------------------------- // Special functions // ----------------------------------------------------------------------------- #pragma region Special functions #ifdef JIM_DEBUG_PANIC static __device__ void JimPanicDump(int condition, const char *fmt, ...) { if (!condition) return; va_list va; va_start(va, fmt); fprintf_(stderr, "\nJIM INTERPRETER PANIC: "); vfprintf(stderr, fmt, va); fprintf_(stderr, "\n\n"); #ifdef HAVE_BACKTRACE { void *array[40]; int size, i; char **strings; size = backtrace(array, 40); strings = backtrace_symbols(array, size); for (i = 0; i < size; i++) fprintf(stderr, "[backtrace] %s\n", strings[i]); fprintf(stderr, "[backtrace] Include the above lines and the output\n"); fprintf(stderr, "[backtrace] of 'nm <executable>' in the bug report.\n"); } #endif va_end(va); exit(1); } #endif #pragma endregion // ----------------------------------------------------------------------------- // Memory allocation // ----------------------------------------------------------------------------- #pragma region Memory allocation __device__ void *Jim_Alloc(int size) { return (size ? malloc(size) : nullptr); } __device__ void Jim_Free(void *ptr) { free(ptr); } __device__ void *Jim_Realloc(void *ptr, int size) { return realloc(ptr, size); } __device__ char *Jim_StrDup(const char *s) { return strdup(s); } __device__ char *Jim_StrDupLen(const char *s, int l) { char *copy = (char *)Jim_Alloc(l + 1); memcpy(copy, s, l + 1); copy[l] = 0; // Just to be sure, original could be substring return copy; } #pragma endregion // ----------------------------------------------------------------------------- // Time related functions // ----------------------------------------------------------------------------- #pragma region Time related functions // Returns current time in microseconds static __device__ jim_wide JimClock() { struct timeval tv; gettimeofday(&tv, NULL); return (jim_wide)(tv.tv_sec * 1000000 + tv.tv_usec); } #pragma endregion // ----------------------------------------------------------------------------- // Hash Tables // ----------------------------------------------------------------------------- #pragma region Hash Tables // -------------------------- private prototypes ---------------------------- static __device__ void JimExpandHashTableIfNeeded(Jim_HashTable *ht); static __device__ unsigned int JimHashTableNextPower(unsigned int size); static __device__ Jim_HashEntry *JimInsertHashEntry(Jim_HashTable *ht, const void *key, int replace); // -------------------------- hash functions -------------------------------- // Thomas Wang's 32 bit Mix Function __device__ unsigned int Jim_IntHashFunction(unsigned int key) { key += ~(key << 15); key ^= (key >> 10); key += (key << 3); key ^= (key >> 6); key += ~(key << 11); key ^= (key >> 16); return key; } // Generic hash function (we are using to multiply by 9 and add the byte as Tcl) __device__ unsigned int Jim_GenHashFunction(const unsigned char *buf, int len) { unsigned int h = 0; while (len--) h += (h << 3) + *buf++; return h; } // ----------------------------- API implementation ------------------------- // reset a hashtable already initialized static __device__ void JimResetHashTable(Jim_HashTable *ht) { ht->table = NULL; ht->size = 0; ht->sizemask = 0; ht->used = 0; ht->collisions = 0; #ifdef JIM_RANDOMISE_HASH // This is initialised to a random value to avoid a hash collision attack. See: n.runs-SA-2011.004 ht->uniq = (rand() ^ time(NULL) ^ clock()); #else ht->uniq = 0; #endif } static __device__ void JimInitHashTableIterator(Jim_HashTable *ht, Jim_HashTableIterator *iter) { iter->ht = ht; iter->index = -1; iter->entry = NULL; iter->nextEntry = NULL; } // Initialize the hash table __device__ int Jim_InitHashTable(Jim_HashTable *ht, const Jim_HashTableType *type, void *privDataPtr) { JimResetHashTable(ht); ht->type = type; ht->privdata = privDataPtr; return JIM_OK; } // Resize the table to the minimal size that contains all the elements, but with the invariant of a USER/BUCKETS ration near to <= 1 __device__ void Jim_ResizeHashTable(Jim_HashTable *ht) { int minimal = ht->used; if (minimal < JIM_HT_INITIAL_SIZE) minimal = JIM_HT_INITIAL_SIZE; Jim_ExpandHashTable(ht, minimal); } // Expand or create the hashtable __device__ void Jim_ExpandHashTable(Jim_HashTable *ht, unsigned int size) { Jim_HashTable n; // the new hashtable unsigned int realsize = JimHashTableNextPower(size), i; // the size is invalid if it is smaller than the number of elements already inside the hashtable if (size <= ht->used) return; Jim_InitHashTable(&n, ht->type, ht->privdata); n.size = realsize; n.sizemask = realsize - 1; n.table = (Jim_HashEntry **)Jim_Alloc(realsize * sizeof(Jim_HashEntry *)); // Keep the same 'uniq' as the original n.uniq = ht->uniq; // Initialize all the pointers to NULL memset(n.table, 0, realsize * sizeof(Jim_HashEntry *)); // Copy all the elements from the old to the new table: note that if the old hash table is empty ht->used is zero, so Jim_ExpandHashTable just creates an empty hash table. n.used = ht->used; for (i = 0; ht->used > 0; i++) { Jim_HashEntry *he, *nextHe; if (ht->table[i] == NULL) continue; // For each hash entry on this slot... he = ht->table[i]; while (he) { unsigned int h; nextHe = he->next; // Get the new element index h = Jim_HashKey(ht, he->key) & n.sizemask; he->next = n.table[h]; n.table[h] = he; ht->used--; // Pass to the next element he = nextHe; } } assert(ht->used == 0); Jim_Free(ht->table); // Remap the new hashtable in the old *ht = n; } // Add an element to the target hash table __device__ int Jim_AddHashEntry(Jim_HashTable *ht, const void *key, void *val) { Jim_HashEntry *entry; // Get the index of the new element, or -1 if the element already exists. entry = JimInsertHashEntry(ht, key, 0); if (entry == NULL) return JIM_ERROR; // Set the hash entry fields. Jim_SetHashKey(ht, entry, key); Jim_SetHashVal(ht, entry, val); return JIM_OK; } // Add an element, discarding the old if the key already exists __device__ int Jim_ReplaceHashEntry(Jim_HashTable *ht, const void *key, void *val) { int existed; Jim_HashEntry *entry; // Get the index of the new element, or -1 if the element already exists. entry = JimInsertHashEntry(ht, key, 1); if (entry->key) { // It already exists, so only replace the value. Note if both a destructor and a duplicate function exist, // need to dup before destroy. perhaps they are the same reference counted object if (ht->type->valDestructor && ht->type->valDup) { void *newval = ht->type->valDup(ht->privdata, val); ht->type->valDestructor(ht->privdata, entry->u.val); entry->u.val = newval; } else { Jim_FreeEntryVal(ht, entry); Jim_SetHashVal(ht, entry, val); } existed = 1; } else { // Doesn't exist, so set the key Jim_SetHashKey(ht, entry, key); Jim_SetHashVal(ht, entry, val); existed = 0; } return existed; } // Search and remove an element __device__ int Jim_DeleteHashEntry(Jim_HashTable *ht, const void *key) { unsigned int h; Jim_HashEntry *he, *prevHe; if (ht->used == 0) return JIM_ERROR; h = Jim_HashKey(ht, key) & ht->sizemask; he = ht->table[h]; prevHe = NULL; while (he) { if (Jim_CompareHashKeys(ht, key, he->key)) { // Unlink the element from the list if (prevHe) prevHe->next = he->next; else ht->table[h] = he->next; Jim_FreeEntryKey(ht, he); Jim_FreeEntryVal(ht, he); Jim_Free(he); ht->used--; return JIM_OK; } prevHe = he; he = he->next; } return JIM_ERROR; // not found } // Destroy an entire hash table and leave it ready for reuse __device__ int Jim_FreeHashTable(Jim_HashTable *ht) { unsigned int i; // Free all the elements for (i = 0; ht->used > 0; i++) { Jim_HashEntry *he, *nextHe; if ((he = ht->table[i]) == NULL) continue; while (he) { nextHe = he->next; Jim_FreeEntryKey(ht, he); Jim_FreeEntryVal(ht, he); Jim_Free(he); ht->used--; he = nextHe; } } // Free the table and the allocated cache structure Jim_Free(ht->table); // Re-initialize the table JimResetHashTable(ht); return JIM_OK; // never fails } __device__ Jim_HashEntry *Jim_FindHashEntry(Jim_HashTable *ht, const void *key) { Jim_HashEntry *he; unsigned int h; if (ht->used == 0) return NULL; h = Jim_HashKey(ht, key) & ht->sizemask; he = ht->table[h]; while (he) { if (Jim_CompareHashKeys(ht, key, he->key)) return he; he = he->next; } return NULL; } __device__ Jim_HashTableIterator *Jim_GetHashTableIterator(Jim_HashTable *ht) { Jim_HashTableIterator *iter = (Jim_HashTableIterator *)Jim_Alloc(sizeof(*iter)); JimInitHashTableIterator(ht, iter); return iter; } __device__ Jim_HashEntry *Jim_NextHashEntry(Jim_HashTableIterator *iter) { while (1) { if (iter->entry == NULL) { iter->index++; if (iter->index >= (signed)iter->ht->size) break; iter->entry = iter->ht->table[iter->index]; } else iter->entry = iter->nextEntry; if (iter->entry) { // We need to save the 'next' here, the iterator user may delete the entry we are returning. iter->nextEntry = iter->entry->next; return iter->entry; } } return NULL; } // ------------------------- private functions ------------------------------ // Expand the hash table if needed static __device__ void JimExpandHashTableIfNeeded(Jim_HashTable *ht) { // If the hash table is empty expand it to the intial size, if the table is "full" dobule its size. if (ht->size == 0) Jim_ExpandHashTable(ht, JIM_HT_INITIAL_SIZE); if (ht->size == ht->used) Jim_ExpandHashTable(ht, ht->size * 2); } // Our hash table capability is a power of two static __device__ unsigned int JimHashTableNextPower(unsigned int size) { unsigned int i = JIM_HT_INITIAL_SIZE; if (size >= 2147483648U) return 2147483648U; while (1) { if (i >= size) return i; i *= 2; } } // Returns the index of a free slot that can be populated with a hash entry for the given 'key'. // If the key already exists, -1 is returned. static __device__ Jim_HashEntry *JimInsertHashEntry(Jim_HashTable *ht, const void *key, int replace) { // Expand the hashtable if needed JimExpandHashTableIfNeeded(ht); // Compute the key hash value unsigned int h = Jim_HashKey(ht, key) & ht->sizemask; // Search if this slot does not already contain the given key Jim_HashEntry *he = ht->table[h]; while (he) { if (Jim_CompareHashKeys(ht, key, he->key)) return (replace ? he : NULL); he = he->next; } // Allocates the memory and stores key he = (Jim_HashEntry *)Jim_Alloc(sizeof(*he)); he->next = ht->table[h]; ht->table[h] = he; ht->used++; he->key = NULL; return he; } // ----------------------- StringCopy Hash Table Type ------------------------ static __device__ unsigned int JimStringCopyHTHashFunction(const void *key) { return Jim_GenHashFunction((const unsigned char *)key, (int)strlen((const char *)key)); } static __device__ void *JimStringCopyHTDup(void *privdata, const void *key) { return Jim_StrDup((const char *)key); } static __device__ int JimStringCopyHTKeyCompare(void *privdata, const void *key1, const void *key2) { return !strcmp((const char *)key1, (const char *)key2); } static __device__ void JimStringCopyHTKeyDestructor(void *privdata, void *key) { Jim_Free(key); } static __device__ const Jim_HashTableType JimPackageHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor NULL // val destructor }; typedef struct AssocDataValue { Jim_InterpDeleteProc *delProc; void *data; } AssocDataValue; static __device__ void JimAssocDataHashTableValueDestructor(void *privdata, void *data) { AssocDataValue *assocPtr = (AssocDataValue *)data; if (assocPtr->delProc != NULL) assocPtr->delProc((Jim_Interp *)privdata, assocPtr->data); Jim_Free(data); } __constant__ static const Jim_HashTableType JimAssocDataHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor JimAssocDataHashTableValueDestructor // val destructor }; #pragma endregion // ----------------------------------------------------------------------------- // Stack - This is a simple generic stack implementation. It is used for example in the 'expr' expression compiler. // ----------------------------------------------------------------------------- #pragma region Stack __device__ void Jim_InitStack(Jim_Stack *stack) { stack->len = 0; stack->maxlen = 0; stack->vector = NULL; } __device__ void Jim_FreeStack(Jim_Stack *stack) { Jim_Free(stack->vector); } __device__ int Jim_StackLen(Jim_Stack *stack) { return stack->len; } __device__ void Jim_StackPush(Jim_Stack *stack, void *element) { int neededLen = stack->len + 1; if (neededLen > stack->maxlen) { stack->maxlen = (neededLen < 20 ? 20 : neededLen * 2); stack->vector = (void **)Jim_Realloc(stack->vector, sizeof(void *) * stack->maxlen); } stack->vector[stack->len] = element; stack->len++; } __device__ void *Jim_StackPop(Jim_Stack *stack) { if (stack->len == 0) return NULL; stack->len--; return stack->vector[stack->len]; } __device__ void *Jim_StackPeek(Jim_Stack *stack) { if (stack->len == 0) return NULL; return stack->vector[stack->len - 1]; } __device__ void Jim_FreeStackElements(Jim_Stack *stack, void (*freeFunc)(void*ptr)) { for (int i = 0; i < stack->len; i++) freeFunc(stack->vector[i]); } #pragma endregion // ----------------------------------------------------------------------------- // Tcl Parser // ----------------------------------------------------------------------------- #pragma region Tcl Parser // Token types #define JIM_TT_NONE 0 // No token returned #define JIM_TT_STR 1 // simple string #define JIM_TT_ESC 2 // string that needs escape chars conversion #define JIM_TT_VAR 3 // var substitution #define JIM_TT_DICTSUGAR 4 // Syntax sugar for [dict get], $foo(bar) #define JIM_TT_CMD 5 // command substitution // Note: Keep these three together for TOKEN_IS_SEP() #define JIM_TT_SEP 6 // word separator (white space) #define JIM_TT_EOL 7 // line separator #define JIM_TT_EOF 8 // end of script #define JIM_TT_LINE 9 // special 'start-of-line' token. arg is # of arguments to the command. -ve if {*} #define JIM_TT_WORD 10 // special 'start-of-word' token. arg is # of tokens to combine. -ve if {*} // Additional token types needed for expressions #define JIM_TT_SUBEXPR_START 11 #define JIM_TT_SUBEXPR_END 12 #define JIM_TT_SUBEXPR_COMMA 13 #define JIM_TT_EXPR_INT 14 #define JIM_TT_EXPR_DOUBLE 15 #define JIM_TT_EXPRSUGAR 16 // $(expression) // Operator token types start here #define JIM_TT_EXPR_OP 20 #define TOKEN_IS_SEP(type) (type >= JIM_TT_SEP && type <= JIM_TT_EOF) // Parser states #define JIM_PS_DEF 0 // Default state #define JIM_PS_QUOTE 1 // Inside "" #define JIM_PS_DICTSUGAR 2 // Tokenising abc(def) into 4 separate tokens // Results of missing quotes, braces, etc. from parsing. struct JimParseMissing { int ch; // At end of parse, ' ' if complete or '{', '[', '"', '\\' , '{' if incomplete int line; // Line number starting the missing token }; // Parser context structure. The same context is used both to parse Tcl scripts and lists. struct JimParserCtx { const char *p; // Pointer to the point of the program we are parsing int len; // Remaining length int linenr; // Current line number const char *tstart; const char *tend; // Returned token is at tstart-tend in 'prg'. int tline; // Line number of the returned token int tt; // Token type int eof; // Non zero if EOF condition is true. int state; // Parser state int comment; // Non zero if the next chars may be a comment. struct JimParseMissing missing; // Details of any missing quotes, etc. }; static __device__ int JimParseScript(struct JimParserCtx *pc); static __device__ int JimParseSep(struct JimParserCtx *pc); static __device__ int JimParseEol(struct JimParserCtx *pc); static __device__ int JimParseCmd(struct JimParserCtx *pc); static __device__ int JimParseQuote(struct JimParserCtx *pc); static __device__ int JimParseVar(struct JimParserCtx *pc); static __device__ int JimParseBrace(struct JimParserCtx *pc); static __device__ int JimParseStr(struct JimParserCtx *pc); static __device__ int JimParseComment(struct JimParserCtx *pc); static __device__ void JimParseSubCmd(struct JimParserCtx *pc); static __device__ int JimParseSubQuote(struct JimParserCtx *pc); static __device__ Jim_Obj *JimParserGetTokenObj(Jim_Interp *interp, struct JimParserCtx *pc); // Initialize a parser context. 'prg' is a pointer to the program text, linenr is the line number of the first line contained in the program. static __device__ void JimParserInit(struct JimParserCtx *pc, const char *prg, int len, int linenr) { pc->p = prg; pc->len = len; pc->tstart = NULL; pc->tend = NULL; pc->tline = 0; pc->tt = JIM_TT_NONE; pc->eof = 0; pc->state = JIM_PS_DEF; pc->linenr = linenr; pc->comment = 1; pc->missing.ch = ' '; pc->missing.line = linenr; } static __device__ int JimParseScript(struct JimParserCtx *pc) { while (1) { // the while is used to reiterate with continue if needed if (!pc->len) { pc->tstart = pc->p; pc->tend = pc->p - 1; pc->tline = pc->linenr; pc->tt = JIM_TT_EOL; pc->eof = 1; return JIM_OK; } switch (*(pc->p)) { case '\\': if (*(pc->p + 1) == '\n' && pc->state == JIM_PS_DEF) return JimParseSep(pc); pc->comment = 0; return JimParseStr(pc); case ' ': case '\t': case '\r': case '\f': if (pc->state == JIM_PS_DEF) return JimParseSep(pc); pc->comment = 0; return JimParseStr(pc); case '\n': case ';': pc->comment = 1; if (pc->state == JIM_PS_DEF) return JimParseEol(pc); return JimParseStr(pc); case '[': pc->comment = 0; return JimParseCmd(pc); case '$': pc->comment = 0; if (JimParseVar(pc) == JIM_ERROR) { // An orphan $. Create as a separate token pc->tstart = pc->tend = pc->p++; pc->len--; pc->tt = JIM_TT_ESC; } return JIM_OK; case '#': if (pc->comment) { JimParseComment(pc); continue; } return JimParseStr(pc); default: pc->comment = 0; return JimParseStr(pc); } //return JIM_OK; // unreached } } static __device__ int JimParseSep(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; while (isspace(*pc->p) || (*pc->p == '\\' && *(pc->p + 1) == '\n')) { if (*pc->p == '\n') break; if (*pc->p == '\\') { pc->p++; pc->len--; pc->linenr++; } pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_SEP; return JIM_OK; } static __device__ int JimParseEol(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; while (isspace(*pc->p) || *pc->p == ';') { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_EOL; return JIM_OK; } // Here are the rules for parsing: // {braced expression} // - Count open and closing braces // - Backslash escapes meaning of braces // // "quoted expression" // - First double quote at start of word terminates the expression // - Backslash escapes quote and bracket // - [commands brackets] are counted/nested // - command rules apply within [brackets], not quoting rules (i.e. quotes have their own rules) // // [command expression] // - Count open and closing brackets // - Backslash escapes quote, bracket and brace // - [commands brackets] are counted/nested // - "quoted expressions" are parsed according to quoting rules // - {braced expressions} are parsed according to brace rules // // For everything, backslash escapes the next char, newline increments current line // Parses a braced expression starting at pc->p. // Positions the parser at the end of the braced expression, sets pc->tend and possibly pc->missing. static __device__ void JimParseSubBrace(struct JimParserCtx *pc) { int level = 1; // Skip the brace pc->p++; pc->len--; while (pc->len) { switch (*pc->p) { case '\\': if (pc->len > 1) { if (*++pc->p == '\n') pc->linenr++; pc->len--; } break; case '{': level++; break; case '}': if (--level == 0) { pc->tend = pc->p - 1; pc->p++; pc->len--; return; } break; case '\n': pc->linenr++; break; } pc->p++; pc->len--; } pc->missing.ch = '{'; pc->missing.line = pc->tline; pc->tend = pc->p - 1; } // Parses a quoted expression starting at pc->p. // Positions the parser at the end of the quoted expression, sets pc->tend and possibly pc->missing. // Returns the type of the token of the string, either JIM_TT_ESC (if it contains values which need to be [subst]ed) or JIM_TT_STR. static __device__ int JimParseSubQuote(struct JimParserCtx *pc) { int tt = JIM_TT_STR; int line = pc->tline; // Skip the quote pc->p++; pc->len--; while (pc->len) { switch (*pc->p) { case '\\': if (pc->len > 1) { if (*++pc->p == '\n') pc->linenr++; pc->len--; tt = JIM_TT_ESC; } break; case '"': pc->tend = pc->p - 1; pc->p++; pc->len--; return tt; case '[': JimParseSubCmd(pc); tt = JIM_TT_ESC; continue; case '\n': pc->linenr++; break; case '$': tt = JIM_TT_ESC; break; } pc->p++; pc->len--; } pc->missing.ch = '"'; pc->missing.line = line; pc->tend = pc->p - 1; return tt; } // Parses a [command] expression starting at pc->p. // Positions the parser at the end of the command expression, sets pc->tend and possibly pc->missing. static __device__ void JimParseSubCmd(struct JimParserCtx *pc) { int level = 1; int startofword = 1; int line = pc->tline; // Skip the bracket pc->p++; pc->len--; while (pc->len) { switch (*pc->p) { case '\\': if (pc->len > 1) { if (*++pc->p == '\n') pc->linenr++; pc->len--; } break; case '[': level++; break; case ']': if (--level == 0) { pc->tend = pc->p - 1; pc->p++; pc->len--; return; } break; case '"': if (startofword) { JimParseSubQuote(pc); continue; } break; case '{': JimParseSubBrace(pc); startofword = 0; continue; case '\n': pc->linenr++; break; } startofword = isspace(*pc->p); pc->p++; pc->len--; } pc->missing.ch = '['; pc->missing.line = line; pc->tend = pc->p - 1; } static __device__ int JimParseBrace(struct JimParserCtx *pc) { pc->tstart = pc->p + 1; pc->tline = pc->linenr; pc->tt = JIM_TT_STR; JimParseSubBrace(pc); return JIM_OK; } static __device__ int JimParseCmd(struct JimParserCtx *pc) { pc->tstart = pc->p + 1; pc->tline = pc->linenr; pc->tt = JIM_TT_CMD; JimParseSubCmd(pc); return JIM_OK; } static __device__ int JimParseQuote(struct JimParserCtx *pc) { pc->tstart = pc->p + 1; pc->tline = pc->linenr; pc->tt = JimParseSubQuote(pc); return JIM_OK; } static __device__ int JimParseVar(struct JimParserCtx *pc) { // skip the $ pc->p++; pc->len--; #ifdef EXPRSUGAR_BRACKET if (*pc->p == '[') { // Parse $[...] expr shorthand syntax JimParseCmd(pc); pc->tt = JIM_TT_EXPRSUGAR; return JIM_OK; } #endif pc->tstart = pc->p; pc->tt = JIM_TT_VAR; pc->tline = pc->linenr; if (*pc->p == '{') { pc->tstart = ++pc->p; pc->len--; while (pc->len && *pc->p != '}') { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } pc->tend = pc->p - 1; if (pc->len) { pc->p++; pc->len--; } } else { while (1) { // Skip double colon, but not single colon! if (pc->p[0] == ':' && pc->p[1] == ':') { while (*pc->p == ':') { pc->p++; pc->len--; } continue; } // Note that any char >= 0x80 must be part of a utf-8 char. We consider all unicode points outside of ASCII as letters if (isalnum(*pc->p) || *pc->p == '_' || (unsigned char)(*pc->p) >= 0x80) { pc->p++; pc->len--; continue; } break; } // Parse [dict get] syntax sugar. if (*pc->p == '(') { int count = 1; const char *paren = NULL; pc->tt = JIM_TT_DICTSUGAR; while (count && pc->len) { pc->p++; pc->len--; if (*pc->p == '\\' && pc->len >= 1) { pc->p++; pc->len--; } else if (*pc->p == '(') count++; else if (*pc->p == ')') { paren = pc->p; count--; } } if (count == 0) { pc->p++; pc->len--; } else if (paren) { // Did not find a matching paren. Back up paren++; pc->len += (int)(pc->p - paren); pc->p = paren; } #ifndef EXPRSUGAR_BRACKET if (*pc->tstart == '(') { pc->tt = JIM_TT_EXPRSUGAR; } #endif } pc->tend = pc->p - 1; } // Check if we parsed just the '$' character. That's not a variable so an error is returned to tell the state machine to consider this '$' just a string. */ if (pc->tstart == pc->p) { pc->p--; pc->len++; return JIM_ERROR; } return JIM_OK; } static __device__ int JimParseStr(struct JimParserCtx *pc) { if (pc->tt == JIM_TT_SEP || pc->tt == JIM_TT_EOL || pc->tt == JIM_TT_NONE || pc->tt == JIM_TT_STR) { // Starting a new word if (*pc->p == '{') return JimParseBrace(pc); if (*pc->p == '"') { pc->state = JIM_PS_QUOTE; pc->p++; pc->len--; // In case the end quote is missing pc->missing.line = pc->tline; } } pc->tstart = pc->p; pc->tline = pc->linenr; while (1) { if (pc->len == 0) { if (pc->state == JIM_PS_QUOTE) pc->missing.ch = '"'; pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } switch (*pc->p) { case '\\': if (pc->state == JIM_PS_DEF && *(pc->p + 1) == '\n') { pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } if (pc->len >= 2) { if (*(pc->p + 1) == '\n') pc->linenr++; pc->p++; pc->len--; } // End of script with trailing backslash else if (pc->len == 1) pc->missing.ch = '\\'; break; case '(': // If the following token is not '$' just keep going if (pc->len > 1 && pc->p[1] != '$') break; case ')': // Only need a separate ')' token if the previous was a var if (*pc->p == '(' || pc->tt == JIM_TT_VAR) { if (pc->p == pc->tstart) { // At the start of the token, so just return this char pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } break; case '$': case '[': pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; case ' ': case '\t': case '\n': case '\r': case '\f': case ';': if (pc->state == JIM_PS_DEF) { pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } else if (*pc->p == '\n') pc->linenr++; break; case '"': if (pc->state == JIM_PS_QUOTE) { pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; pc->p++; pc->len--; pc->state = JIM_PS_DEF; return JIM_OK; } break; } pc->p++; pc->len--; } //return JIM_OK; // unreached } static __device__ int JimParseComment(struct JimParserCtx *pc) { while (*pc->p) { if (*pc->p == '\\') { pc->p++; pc->len--; if (pc->len == 0) { pc->missing.ch = '\\'; return JIM_OK; } if (*pc->p == '\n') pc->linenr++; } else if (*pc->p == '\n') { pc->p++; pc->len--; pc->linenr++; break; } pc->p++; pc->len--; } return JIM_OK; } // xdigitval and odigitval are helper functions for JimEscape() static __device__ int xdigitval(int c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static __device__ int odigitval(int c) { if (c >= '0' && c <= '7') return c - '0'; return -1; } // Perform Tcl escape substitution of 's', storing the result string into 'dest'. The escaped string is guaranteed to // be the same length or shorted than the source string. Slen is the length of the string at 's', if it's -1 the string length will be calculated by the function. // The function returns the length of the resulting string. static __device__ int JimEscape(char *dest, const char *s, int slen) { char *p = dest; int i, len; if (slen == -1) slen = (int)strlen(s); for (i = 0; i < slen; i++) { switch (s[i]) { case '\\': switch (s[i + 1]) { case 'a': *p++ = 0x7; i++; break; case 'b': *p++ = 0x8; i++; break; case 'f': *p++ = 0xc; i++; break; case 'n': *p++ = 0xa; i++; break; case 'r': *p++ = 0xd; i++; break; case 't': *p++ = 0x9; i++; break; case 'u': case 'U': case 'x': // A unicode or hex sequence. // \x Expect 1-2 hex chars and convert to hex. // \u Expect 1-4 hex chars and convert to utf-8. // \U Expect 1-8 hex chars and convert to utf-8. // \u{NNN} supports 1-6 hex chars and convert to utf-8. // An invalid sequence means simply the escaped char. { unsigned val = 0; int k; int maxchars = 2; i++; if (s[i] == 'U') maxchars = 8; else if (s[i] == 'u') { if (s[i + 1] == '{') { maxchars = 6; i++; } else maxchars = 4; } for (k = 0; k < maxchars; k++) { int c = xdigitval(s[i + k + 1]); if (c == -1) break; val = (val << 4) | c; } // The \u{nnn} syntax supports up to 21 bit codepoints. if (s[i] == '{') { if (k == 0 || val > 0x1fffff || s[i + k + 1] != '}') { // Back up i--; k = 0; } // Skip the closing brace else k++; } if (k) { // Got a valid sequence, so convert if (s[i] == 'x') *p++ = val; else p += utf8_fromunicode(p, val); i += k; break; } // Not a valid codepoint, just an escaped char *p++ = s[i]; } break; case 'v': *p++ = 0xb; i++; break; case '\0': *p++ = '\\'; i++; break; case '\n': // Replace all spaces and tabs after backslash newline with a single space *p++ = ' '; do { i++; } while (s[i + 1] == ' ' || s[i + 1] == '\t'); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': // octal escape { int val = 0; int c = odigitval(s[i + 1]); val = c; c = odigitval(s[i + 2]); if (c == -1) { *p++ = val; i++; break; } val = (val * 8) + c; c = odigitval(s[i + 3]); if (c == -1) { *p++ = val; i += 2; break; } val = (val * 8) + c; *p++ = val; i += 3; } break; default: *p++ = s[i + 1]; i++; break; } break; default: *p++ = s[i]; break; } } len = (int)(p - dest); *p = '\0'; return len; } // Returns a dynamically allocated copy of the current token in the parser context. The function performs conversion of escapes if the token is of type JIM_TT_ESC. // Note that after the conversion, tokens that are grouped with braces in the source code, are always recognizable from the identical string obtained in a different way from the type. // For example the string: // {*}$a // will return as first token "*", of type JIM_TT_STR // While the string: // *$a // will return as first token "*", of type JIM_TT_ESC static __device__ Jim_Obj *JimParserGetTokenObj(Jim_Interp *interp, struct JimParserCtx *pc) { char *token; int len; const char *start = pc->tstart; const char *end = pc->tend; if (start > end) { len = 0; token = (char *)Jim_Alloc(1); token[0] = '\0'; } else { len = (int)(end - start) + 1; token = (char *)Jim_Alloc(len + 1); if (pc->tt != JIM_TT_ESC) { // No escape conversion needed? Just copy it. memcpy(token, start, len); token[len] = '\0'; } // Else convert the escape chars. else len = JimEscape(token, start, len); } return Jim_NewStringObjNoAlloc(interp, token, len); } // Parses the given string to determine if it represents a complete script. // This is useful for interactive shells implementation, for [info complete]. // If 'stateCharPtr' != NULL, the function stores ' ' on complete script, // '{' on scripts incomplete missing one or more '}' to be balanced. // '[' on scripts incomplete missing one or more ']' to be balanced. // '"' on scripts incomplete missing a '"' char. // '\\' on scripts with a trailing backslash. // If the script is complete, 1 is returned, otherwise 0. __device__ int Jim_ScriptIsComplete(const char *s, int len, char *stateCharPtr) { struct JimParserCtx parser; JimParserInit(&parser, s, len, 1); while (!parser.eof) JimParseScript(&parser); if (stateCharPtr) *stateCharPtr = parser.missing.ch; return (parser.missing.ch == ' '); } #pragma endregion // ----------------------------------------------------------------------------- // Tcl Lists parsing // ----------------------------------------------------------------------------- #pragma region Tcl Lists parsing static __device__ int JimParseListSep(struct JimParserCtx *pc); static __device__ int JimParseListStr(struct JimParserCtx *pc); static __device__ int JimParseListQuote(struct JimParserCtx *pc); static __device__ int JimParseList(struct JimParserCtx *pc) { if (isspace(*pc->p)) return JimParseListSep(pc); switch (*pc->p) { case '"': return JimParseListQuote(pc); case '{': return JimParseBrace(pc); default: if (pc->len) return JimParseListStr(pc); break; } pc->tstart = pc->tend = pc->p; pc->tline = pc->linenr; pc->tt = JIM_TT_EOL; pc->eof = 1; return JIM_OK; } static __device__ int JimParseListSep(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; while (isspace(*pc->p)) { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_SEP; return JIM_OK; } static __device__ int JimParseListQuote(struct JimParserCtx *pc) { pc->p++; pc->len--; pc->tstart = pc->p; pc->tline = pc->linenr; pc->tt = JIM_TT_STR; while (pc->len) { switch (*pc->p) { case '\\': pc->tt = JIM_TT_ESC; if (--pc->len == 0) { // Trailing backslash pc->tend = pc->p; return JIM_OK; } pc->p++; break; case '\n': pc->linenr++; break; case '"': pc->tend = pc->p - 1; pc->p++; pc->len--; return JIM_OK; } pc->p++; pc->len--; } pc->tend = pc->p - 1; return JIM_OK; } static __device__ int JimParseListStr(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; pc->tt = JIM_TT_STR; while (pc->len) { if (isspace(*pc->p)) { pc->tend = pc->p - 1; return JIM_OK; } if (*pc->p == '\\') { if (--pc->len == 0) { // Trailing backslash pc->tend = pc->p; return JIM_OK; } pc->tt = JIM_TT_ESC; pc->p++; } pc->p++; pc->len--; } pc->tend = pc->p - 1; return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Jim_Obj related functions // ----------------------------------------------------------------------------- #pragma region Jim_Obj related functions // Return a new initialized object. __device__ Jim_Obj *Jim_NewObj(Jim_Interp *interp) { Jim_Obj *objPtr; // Check if there are objects in the free list if (interp->freeList != NULL) { // Unlink the object from the free list objPtr = interp->freeList; interp->freeList = objPtr->nextObjPtr; } // No ready to use objects: allocate a new one else objPtr = (Jim_Obj *)Jim_Alloc(sizeof(*objPtr)); // Object is returned with refCount of 0. Every kind of GC implemented should take care to don't try to scan objects with refCount == 0. objPtr->refCount = 0; // All the other fields are left not initialized to save time. The caller will probably want to set them to the right value anyway. // Put the object into the live list objPtr->prevObjPtr = NULL; objPtr->nextObjPtr = interp->liveList; if (interp->liveList) interp->liveList->prevObjPtr = objPtr; interp->liveList = objPtr; return objPtr; } // Free an object. Actually objects are never freed, but just moved to the free objects list, where they will be reused by Jim_NewObj(). __device__ void Jim_FreeObj(Jim_Interp *interp, Jim_Obj *objPtr) { // Check if the object was already freed, panic. JimPanic(objPtr->refCount != 0, "!!!Object %p freed with bad refcount %d, type=%s", objPtr, objPtr->refCount, objPtr->typePtr ? objPtr->typePtr->name : "<none>"); // Free the internal representation Jim_FreeIntRep(interp, objPtr); // Free the string representation if (objPtr->bytes != NULL) if (objPtr->bytes != JimEmptyStringRep) Jim_Free(objPtr->bytes); // Unlink the object from the live objects list if (objPtr->prevObjPtr) objPtr->prevObjPtr->nextObjPtr = objPtr->nextObjPtr; if (objPtr->nextObjPtr) objPtr->nextObjPtr->prevObjPtr = objPtr->prevObjPtr; if (interp->liveList == objPtr) interp->liveList = objPtr->nextObjPtr; #ifdef JIM_DISABLE_OBJECT_POOL Jim_Free(objPtr); #else // Link the object into the free objects list objPtr->prevObjPtr = NULL; objPtr->nextObjPtr = interp->freeList; if (interp->freeList) interp->freeList->prevObjPtr = objPtr; interp->freeList = objPtr; objPtr->refCount = -1; #endif } // Invalidate the string representation of an object. __device__ void Jim_InvalidateStringRep(Jim_Obj *objPtr) { if (objPtr->bytes != NULL) if (objPtr->bytes != JimEmptyStringRep) Jim_Free(objPtr->bytes); objPtr->bytes = NULL; } // Duplicate an object. The returned object has refcount = 0. __device__ Jim_Obj *Jim_DuplicateObj(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_Obj *dupPtr = Jim_NewObj(interp); if (objPtr->bytes == NULL) dupPtr->bytes = NULL; // Object does not have a valid string representation. else if (objPtr->length == 0) { // Zero length, so don't even bother with the type-specific dup, since all zero length objects look the same dupPtr->bytes = JimEmptyStringRep; dupPtr->length = 0; dupPtr->typePtr = NULL; return dupPtr; } else { dupPtr->bytes = (char *)Jim_Alloc(objPtr->length + 1); dupPtr->length = objPtr->length; memcpy(dupPtr->bytes, objPtr->bytes, objPtr->length + 1); // Copy the null byte too } // By default, the new object has the same type as the old object dupPtr->typePtr = objPtr->typePtr; if (objPtr->typePtr != NULL) { if (objPtr->typePtr->dupIntRepProc == NULL) dupPtr->internalRep = objPtr->internalRep; else objPtr->typePtr->dupIntRepProc(interp, objPtr, dupPtr); // The dup proc may set a different type, e.g. NULL } return dupPtr; } // Return the string representation for objPtr. If the object's string representation is invalid, calls the updateStringProc method to create a new one from the internal representation of the object. __device__ const char *Jim_GetString(Jim_Obj *objPtr, int *lenPtr) { if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } if (lenPtr) *lenPtr = objPtr->length; return objPtr->bytes; } // Just returns the length of the object's string rep __device__ int Jim_Length(Jim_Obj *objPtr) { if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } return objPtr->length; } // Just returns object's string rep __device__ const char *Jim_String(Jim_Obj *objPtr) { if (!objPtr) return nullptr; if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr == NULL, "UpdateStringProc called against typeless value."); JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } return objPtr->bytes; } static __device__ void JimSetStringBytes(Jim_Obj *objPtr, const char *str) { objPtr->bytes = Jim_StrDup(str); objPtr->length = (int)strlen(str); } static __device__ void FreeDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); __constant__ static const Jim_ObjType _dictSubstObjType = { "dict-substitution", FreeDictSubstInternalRep, DupDictSubstInternalRep, NULL, JIM_TYPE_NONE, }; static __device__ void FreeInterpolatedInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.indexObjPtr); } __constant__ static const Jim_ObjType _interpolatedObjType = { "interpolated", FreeInterpolatedInternalRep, NULL, NULL, JIM_TYPE_NONE, }; #pragma endregion // ----------------------------------------------------------------------------- // String Object // ----------------------------------------------------------------------------- #pragma region String Object static __device__ void DupStringInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ int SetStringFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _stringObjType = { "string", NULL, DupStringInternalRep, NULL, JIM_TYPE_REFERENCES, }; static __device__ void DupStringInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); // This is a bit subtle: the only caller of this function should be Jim_DuplicateObj(), that will copy the string representaion. After the copy, the duplicated // object will not have more room in the buffer than srcPtr->length bytes. So we just set it to length. dupPtr->internalRep.strValue.maxLength = srcPtr->length; dupPtr->internalRep.strValue.charLength = srcPtr->internalRep.strValue.charLength; } static __device__ int SetStringFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_stringObjType) { // Get a fresh string representation. if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } // Free any other internal representation. Jim_FreeIntRep(interp, objPtr); // Set it as string, i.e. just set the maxLength field. objPtr->typePtr = &_stringObjType; objPtr->internalRep.strValue.maxLength = objPtr->length; // Don't know the utf-8 length yet objPtr->internalRep.strValue.charLength = -1; } return JIM_OK; } // Returns the length of the object string in chars, not bytes. // These may be different for a utf-8 string. __device__ int Jim_Utf8Length(Jim_Interp *interp, Jim_Obj *objPtr) { #ifdef JIM_UTF8 SetStringFromAny(interp, objPtr); if (objPtr->internalRep.strValue.charLength < 0) objPtr->internalRep.strValue.charLength = utf8_strlen(objPtr->bytes, objPtr->length); return objPtr->internalRep.strValue.charLength; #else return Jim_Length(objPtr); #endif } // len is in bytes -- see also Jim_NewStringObjUtf8() __device__ Jim_Obj *Jim_NewStringObj(Jim_Interp *interp, const char *s, int len) { Jim_Obj *objPtr = Jim_NewObj(interp); // Need to find out how many bytes the string requires if (len == -1) len = (int)strlen(s); // Alloc/Set the string rep. if (len == 0) objPtr->bytes = JimEmptyStringRep; else { objPtr->bytes = (char *)Jim_Alloc(len + 1); memcpy(objPtr->bytes, s, len); objPtr->bytes[len] = '\0'; } objPtr->length = len; // No typePtr field for the vanilla string object. objPtr->typePtr = NULL; return objPtr; } // charlen is in characters -- see also Jim_NewStringObj() __device__ Jim_Obj *Jim_NewStringObjUtf8(Jim_Interp *interp, const char *s, int charlen) { #ifdef JIM_UTF8 // Need to find out how many bytes the string requires int bytelen = utf8_index(s, charlen); Jim_Obj *objPtr = Jim_NewStringObj(interp, s, bytelen); // Remember the utf8 length, so set the type objPtr->typePtr = &stringObjType; objPtr->internalRep.strValue.maxLength = bytelen; objPtr->internalRep.strValue.charLength = charlen; return objPtr; #else return Jim_NewStringObj(interp, s, charlen); #endif } // This version does not try to duplicate the 's' pointer, but use it directly. __device__ Jim_Obj *Jim_NewStringObjNoAlloc(Jim_Interp *interp, char *s, int len) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->bytes = s; objPtr->length = (len == -1 ? (int)strlen(s) : len); objPtr->typePtr = NULL; return objPtr; } // Low-level string append. Use it only against unshared objects of type "string". static __device__ void StringAppendString(Jim_Obj *objPtr, const char *str, int len) { if (len == -1) len = (int)strlen(str); int needlen = objPtr->length + len; if (objPtr->internalRep.strValue.maxLength < needlen || objPtr->internalRep.strValue.maxLength == 0) { needlen *= 2; // Inefficient to malloc() for less than 8 bytes if (needlen < 7) needlen = 7; if (objPtr->bytes == JimEmptyStringRep) objPtr->bytes = (char *)Jim_Alloc(needlen + 1); else objPtr->bytes = (char *)Jim_Realloc(objPtr->bytes, needlen + 1); objPtr->internalRep.strValue.maxLength = needlen; } memcpy(objPtr->bytes + objPtr->length, str, len); objPtr->bytes[objPtr->length + len] = '\0'; if (objPtr->internalRep.strValue.charLength >= 0) // Update the utf-8 char length objPtr->internalRep.strValue.charLength += utf8_strlen(objPtr->bytes + objPtr->length, len); objPtr->length += len; } // Higher level API to append strings to objects. Object must not be unshared for each of these. __device__ void Jim_AppendString(Jim_Interp *interp, Jim_Obj *objPtr, const char *str, int len) { JimPanic(Jim_IsShared(objPtr), "Jim_AppendString called with shared object"); SetStringFromAny(interp, objPtr); StringAppendString(objPtr, str, len); } __device__ void Jim_AppendObj(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *appendObjPtr) { int len; const char *str = Jim_GetString(appendObjPtr, &len); Jim_AppendString(interp, objPtr, str, len); } __device__ void Jim_AppendStrings(Jim_Interp *interp, Jim_Obj *objPtr, ...) { va_list va; va_start(va, objPtr); JimPanic(Jim_IsShared(objPtr), "Jim_AppendString_ called with shared object"); SetStringFromAny(interp, objPtr); while (1) { const char *s = va_arg(va, const char *); if (s == NULL) break; Jim_AppendString(interp, objPtr, s, -1); } va_end(va); } __device__ int Jim_StringEqObj(Jim_Obj *aObjPtr, Jim_Obj *bObjPtr) { if (aObjPtr == bObjPtr) return 1; else { int Alen, Blen; const char *sA = Jim_GetString(aObjPtr, &Alen); const char *sB = Jim_GetString(bObjPtr, &Blen); return (Alen == Blen && !memcmp(sA, sB, Alen)); } } // Note. Does not support embedded nulls in either the pattern or the object. __device__ int Jim_StringMatchObj(Jim_Interp *interp, Jim_Obj *patternObjPtr, Jim_Obj *objPtr, int nocase) { return JimGlobMatch(Jim_String(patternObjPtr), Jim_String(objPtr), nocase); } // Note: does not support embedded nulls for the nocase option. __device__ int Jim_StringCompareObj(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *secondObjPtr, int nocase) { int l1, l2; const char *s1 = Jim_GetString(firstObjPtr, &l1); const char *s2 = Jim_GetString(secondObjPtr, &l2); if (nocase) return JimStringCompareLen(s1, s2, -1, nocase); // Do a character compare for nocase return JimStringCompare(s1, l1, s2, l2); } // Like Jim_StringCompareObj() except compares to a maximum of the length of firstObjPtr. // Note: does not support embedded nulls __device__ int Jim_StringCompareLenObj(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *secondObjPtr, int nocase) { const char *s1 = Jim_String(firstObjPtr); const char *s2 = Jim_String(secondObjPtr); return JimStringCompareLen(s1, s2, Jim_Utf8Length(interp, firstObjPtr), nocase); } // Convert a range, as returned by Jim_GetRange(), into an absolute index into an object of the specified length. // This function may return negative values, or values greater than or equal to the length of the list if the index // is out of range. static __device__ int JimRelToAbsIndex(int len, int idx) { return (idx < 0 ? len + idx : idx); } // Convert a pair of indexes (*firstPtr, *lastPtr) as normalized by JimRelToAbsIndex(), into a form suitable for implementation of commands like [string range] and [lrange]. // The resulting range is guaranteed to address valid elements of the structure. // static __device__ void JimRelToAbsRange(int len, int *firstPtr, int *lastPtr, int *rangeLenPtr) { int rangeLen; if (*firstPtr > *lastPtr) rangeLen = 0; else { rangeLen = *lastPtr - *firstPtr + 1; if (rangeLen) { if (*firstPtr < 0) { rangeLen += *firstPtr; *firstPtr = 0; } if (*lastPtr >= len) { rangeLen -= (*lastPtr - (len - 1)); *lastPtr = len - 1; } } } if (rangeLen < 0) rangeLen = 0; *rangeLenPtr = rangeLen; } static __device__ int JimStringGetRange(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr, int len, int *first, int *last, int *range) { if (Jim_GetIndex(interp, firstObjPtr, first) != JIM_OK) return JIM_ERROR; if (Jim_GetIndex(interp, lastObjPtr, last) != JIM_OK) return JIM_ERROR; *first = JimRelToAbsIndex(len, *first); *last = JimRelToAbsIndex(len, *last); JimRelToAbsRange(len, first, last, range); return JIM_OK; } __device__ Jim_Obj *Jim_StringByteRangeObj(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) { int first, last; int rangeLen; int bytelen; const char *str = Jim_GetString(strObjPtr, &bytelen); if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, bytelen, &first, &last, &rangeLen) != JIM_OK) return NULL; if (first == 0 && rangeLen == bytelen) return strObjPtr; return Jim_NewStringObj(interp, str + first, rangeLen); } __device__ Jim_Obj *Jim_StringRangeObj(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) { #ifdef JIM_UTF8 int first, last; const char *str; int len, rangeLen; int bytelen; str = Jim_GetString(strObjPtr, &bytelen); len = Jim_Utf8Length(interp, strObjPtr); if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, len, &first, &last, &rangeLen) != JIM_OK) return NULL; if (first == 0 && rangeLen == len) return strObjPtr; if (len == bytelen) return Jim_NewStringObj(interp, str + first, rangeLen); // ASCII optimisation return Jim_NewStringObjUtf8(interp, str + utf8_index(str, first), rangeLen); #else return Jim_StringByteRangeObj(interp, strObjPtr, firstObjPtr, lastObjPtr); #endif } __device__ Jim_Obj *JimStringReplaceObj(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr, Jim_Obj *newStrObj) { int len = Jim_Utf8Length(interp, strObjPtr); int first, last, rangeLen; if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, len, &first, &last, &rangeLen) != JIM_OK) return NULL; if (last < first) return strObjPtr; const char *str = Jim_String(strObjPtr); // Before part Jim_Obj *objPtr = Jim_NewStringObjUtf8(interp, str, first); // Replacement if (newStrObj) Jim_AppendObj(interp, objPtr, newStrObj); // After part Jim_AppendString(interp, objPtr, str + utf8_index(str, last + 1), len - last - 1); return objPtr; } // Note: does not support embedded nulls. static __device__ void JimStrCopyUpperLower(char *dest, const char *str, int uc) { while (*str) { int c; str += utf8_tounicode(str, &c); dest += utf8_getchars(dest, uc ? utf8_upper(c) : utf8_lower(c)); } *dest = 0; } // Note: does not support embedded nulls. static __device__ Jim_Obj *JimStringToLower(Jim_Interp *interp, Jim_Obj *strObjPtr) { SetStringFromAny(interp, strObjPtr); int len; const char *str = Jim_GetString(strObjPtr, &len); #ifdef JIM_UTF8 // Case mapping can change the utf-8 length of the string. But at worst it will be by one extra byte per char len *= 2; #endif char *buf = (char *)Jim_Alloc(len + 1); JimStrCopyUpperLower(buf, str, 0); return Jim_NewStringObjNoAlloc(interp, buf, -1); } // Note: does not support embedded nulls. static __device__ Jim_Obj *JimStringToUpper(Jim_Interp *interp, Jim_Obj *strObjPtr) { if (strObjPtr->typePtr != &_stringObjType) SetStringFromAny(interp, strObjPtr); int len; const char *str = Jim_GetString(strObjPtr, &len); #ifdef JIM_UTF8 // Case mapping can change the utf-8 length of the string. But at worst it will be by one extra byte per char len *= 2; #endif char *buf = (char *)Jim_Alloc(len + 1); JimStrCopyUpperLower(buf, str, 1); return Jim_NewStringObjNoAlloc(interp, buf, -1); } // Note: does not support embedded nulls. static __device__ Jim_Obj *JimStringToTitle(Jim_Interp *interp, Jim_Obj *strObjPtr) { int len; const char *str = Jim_GetString(strObjPtr, &len); if (len == 0) return strObjPtr; #ifdef JIM_UTF8 // Case mapping can change the utf-8 length of the string. But at worst it will be by one extra byte per char len *= 2; #endif char *buf, *p; buf = p = (char *)Jim_Alloc(len + 1); int c; str += utf8_tounicode(str, &c); p += utf8_getchars(p, utf8_title(c)); JimStrCopyUpperLower(p, str, 0); return Jim_NewStringObjNoAlloc(interp, buf, -1); } // Similar to memchr() except searches a UTF-8 string 'str' of byte length 'len' for unicode character 'c'. Returns the position if found or NULL if not static __device__ const char *utf8_memchr(const char *str, int len, int c) { #ifdef JIM_UTF8 while (len) { int sc; int n = utf8_tounicode(str, &sc); if (sc == c) return str; str += n; len -= n; } return NULL; #else return (const char *)memchr(str, c, len); #endif } // Searches for the first non-trim char in string (str, len) // If none is found, returns just past the last char. // Lengths are in bytes. static __device__ const char *JimFindTrimLeft(const char *str, int len, const char *trimchars, int trimlen) { while (len) { int c; int n = utf8_tounicode(str, &c); if (utf8_memchr(trimchars, trimlen, c) == NULL) // Not a trim char, so stop break; str += n; len -= n; } return str; } // Searches backwards for a non-trim char in string (str, len). // Returns a pointer to just after the non-trim char, or NULL if not found. // Lengths are in bytes. static __device__ const char *JimFindTrimRight(const char *str, int len, const char *trimchars, int trimlen) { str += len; while (len) { int c; int n = utf8_prev_len(str, len); len -= n; str -= n; n = utf8_tounicode(str, &c); if (utf8_memchr(trimchars, trimlen, c) == NULL) return str + n; } return NULL; } __constant__ static const char _default_trim_chars[] = " \t\n\r"; // sizeof() here includes the null byte __constant__ static int default_trim_chars_len = sizeof(_default_trim_chars); static __device__ Jim_Obj *JimStringTrimLeft(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) { int len; const char *str = Jim_GetString(strObjPtr, &len); const char *trimchars = _default_trim_chars; int trimcharslen = default_trim_chars_len; const char *newstr; if (trimcharsObjPtr) trimchars = Jim_GetString(trimcharsObjPtr, &trimcharslen); newstr = JimFindTrimLeft(str, len, trimchars, trimcharslen); if (newstr == str) return strObjPtr; return Jim_NewStringObj(interp, newstr, len - (int)(newstr - str)); } static __device__ Jim_Obj *JimStringTrimRight(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) { int len; const char *trimchars = _default_trim_chars; int trimcharslen = default_trim_chars_len; const char *nontrim; if (trimcharsObjPtr) trimchars = Jim_GetString(trimcharsObjPtr, &trimcharslen); SetStringFromAny(interp, strObjPtr); len = Jim_Length(strObjPtr); nontrim = JimFindTrimRight(strObjPtr->bytes, len, trimchars, trimcharslen); if (nontrim == NULL) return Jim_NewEmptyStringObj(interp); // All trim, so return a zero-length string if (nontrim == strObjPtr->bytes + len) return strObjPtr; // All non-trim, so return the original object if (Jim_IsShared(strObjPtr)) strObjPtr = Jim_NewStringObj(interp, strObjPtr->bytes, (int)(nontrim - strObjPtr->bytes)); else { // Can modify this string in place strObjPtr->bytes[nontrim - strObjPtr->bytes] = 0; strObjPtr->length = (int)(nontrim - strObjPtr->bytes); } return strObjPtr; } static __device__ Jim_Obj *JimStringTrim(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) { // First trim left. Jim_Obj *objPtr = JimStringTrimLeft(interp, strObjPtr, trimcharsObjPtr); // Now trim right strObjPtr = JimStringTrimRight(interp, objPtr, trimcharsObjPtr); // Note: refCount check is needed since objPtr may be emptyObj if (objPtr != strObjPtr && objPtr->refCount == 0) Jim_FreeNewObj(interp, objPtr); // We don't want this object to be leaked return strObjPtr; } // Some platforms don't have isascii - need a non-macro version #ifdef HAVE_ISASCII #define jim_isascii isascii #else static __device__ int jim_isascii(int c) { return !(c & ~0x7f); } #endif __constant__ static const char *const _strclassnames[] = { "integer", "alpha", "alnum", "ascii", "digit", "double", "lower", "upper", "space", "xdigit", "control", "print", "graph", "punct", NULL }; enum { STR_IS_INTEGER, STR_IS_ALPHA, STR_IS_ALNUM, STR_IS_ASCII, STR_IS_DIGIT, STR_IS_DOUBLE, STR_IS_LOWER, STR_IS_UPPER, STR_IS_SPACE, STR_IS_XDIGIT, STR_IS_CONTROL, STR_IS_PRINT, STR_IS_GRAPH, STR_IS_PUNCT }; static __device__ int JimStringIs(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *strClass, int strict) { int strclass; int i; int (*isclassfunc)(int c) = NULL; if (Jim_GetEnum(interp, strClass, _strclassnames, &strclass, "class", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; int len; const char *str = Jim_GetString(strObjPtr, &len); if (len == 0) { Jim_SetResultBool(interp, !strict); return JIM_OK; } switch (strclass) { case STR_IS_INTEGER: { jim_wide w; Jim_SetResultBool(interp, JimGetWideNoErr(interp, strObjPtr, &w) == JIM_OK); return JIM_OK; } case STR_IS_DOUBLE: { double d; Jim_SetResultBool(interp, Jim_GetDouble(interp, strObjPtr, &d) == JIM_OK && errno != ERANGE); return JIM_OK; } // case STR_IS_ALPHA: for (i = 0; i < len; i++) if (!_isalpha(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_ALNUM: for (i = 0; i < len; i++) if (!isalnum(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_ASCII: for (i = 0; i < len; i++) if (!jim_isascii(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_DIGIT: for (i = 0; i < len; i++) if (!isdigit(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_LOWER: for (i = 0; i < len; i++) if (!_islower(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_UPPER: for (i = 0; i < len; i++) if (!_isupper(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_SPACE: for (i = 0; i < len; i++) if (!isspace(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_XDIGIT: for (i = 0; i < len; i++) if (!_isxdigit(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_CONTROL: for (i = 0; i < len; i++) if (!_iscntrl(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_PRINT: for (i = 0; i < len; i++) if (!_isprint(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // //case STR_IS_GRAPH: for (i = 0; i < len; i++) if (!_isgraph(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // //case STR_IS_PUNCT: for (i = 0; i < len; i++) if (!_ispunct(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // default: // return JIM_ERROR; // } //#else case STR_IS_ALPHA: isclassfunc = isalpha; break; case STR_IS_ALNUM: isclassfunc = isalnum; break; case STR_IS_ASCII: isclassfunc = jim_isascii; break; case STR_IS_DIGIT: isclassfunc = isdigit; break; case STR_IS_LOWER: isclassfunc = islower; break; case STR_IS_UPPER: isclassfunc = isupper; break; case STR_IS_SPACE: isclassfunc = isspace; break; case STR_IS_XDIGIT: isclassfunc = isxdigit; break; case STR_IS_CONTROL: isclassfunc = iscntrl; break; case STR_IS_PRINT: isclassfunc = isprint; break; case STR_IS_GRAPH: isclassfunc = isgraph; break; case STR_IS_PUNCT: isclassfunc = ispunct; break; default: return JIM_ERROR; } for (i = 0; i < len; i++) { if (!isclassfunc(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } } Jim_SetResultBool(interp, 1); return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Compared String Object // ----------------------------------------------------------------------------- #pragma region Compared String Object // This is strange object that allows comparison of a C literal string with a Jim object in a very short time if the same comparison is done // multiple times. For example every time the [if] command is executed, Jim has to check if a given argument is "else". // If the code has no errors, this comparison is true most of the time, so we can cache the pointer of the string of the last matching // comparison inside the object. Because most C compilers perform literal sharing, so that: char *x = "foo", char *y = "foo", will lead to x == y, // this works pretty well even if comparisons are at different places inside the C code. __constant__ static const Jim_ObjType _comparedStringObjType = { "compared-string", NULL, NULL, NULL, JIM_TYPE_REFERENCES, }; // The only way this object is exposed to the API is via the following function. Returns true if the string and the object string repr. // are the same, otherwise zero is returned. // Note: this isn't binary safe, but it hardly needs to be.*/ __device__ int Jim_CompareStringImmediate(Jim_Interp *interp, Jim_Obj *objPtr, const char *str) { if (objPtr->typePtr == &_comparedStringObjType && objPtr->internalRep.ptr == str) return 1; else { const char *objStr = Jim_String(objPtr); if (strcmp(str, objStr) != 0) return 0; if (objPtr->typePtr != &_comparedStringObjType) { Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_comparedStringObjType; } objPtr->internalRep.ptr = (char *)str; // ATTENTION: const cast return 1; } } static __device__ int qsortCompareStringPointers(const void *a, const void *b) { char *const *sa = (char *const *)a; char *const *sb = (char *const *)b; return strcmp(*sa, *sb); } #pragma endregion // ----------------------------------------------------------------------------- // Source Object // // This object is just a string from the language point of view, but the internal representation contains the filename and line number // where this token was read. This information is used by Jim_EvalObj() if the object passed happens to be of type "source". // // This allows propagation of the information about line numbers and file names and gives error messages with absolute line numbers. // // Note that this object uses the internal representation of the Jim_Object, so there is almost no memory overhead. (One Jim_Obj for each filename). // // Also the object will be converted to something else if the given token it represents in the source file is not something to be // evaluated (not a script), and will be specialized in some other way, so the time overhead is also almost zero. // ----------------------------------------------------------------------------- #pragma region Source Object static __device__ void FreeSourceInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupSourceInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); __constant__ static const Jim_ObjType _sourceObjType = { "source", FreeSourceInternalRep, DupSourceInternalRep, NULL, JIM_TYPE_REFERENCES, }; __device__ void FreeSourceInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.sourceValue.fileNameObj); } __device__ void DupSourceInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { dupPtr->internalRep.sourceValue = srcPtr->internalRep.sourceValue; Jim_IncrRefCount(dupPtr->internalRep.sourceValue.fileNameObj); } static __device__ void JimSetSourceInfo(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *fileNameObj, int lineNumber) { JimPanic(Jim_IsShared(objPtr), "JimSetSourceInfo called with shared object"); JimPanic(objPtr->typePtr != NULL, "JimSetSourceInfo called with typed object"); Jim_IncrRefCount(fileNameObj); objPtr->internalRep.sourceValue.fileNameObj = fileNameObj; objPtr->internalRep.sourceValue.lineNumber = lineNumber; objPtr->typePtr = &_sourceObjType; } #pragma endregion // ----------------------------------------------------------------------------- // ScriptLine Object // This object is used only in the Script internal represenation. For each line of the script, it holds the number of tokens on the line and the source line number. #pragma region ScriptLine Object __constant__ static const Jim_ObjType _scriptLineObjType = { "scriptline", NULL, NULL, NULL, JIM_NONE, }; static __device__ Jim_Obj *JimNewScriptLineObj(Jim_Interp *interp, int argc, int line) { Jim_Obj *objPtr; #ifdef DEBUG_SHOW_SCRIPT char buf[100]; snprintf(buf, sizeof(buf), "line=%d, argc=%d", line, argc); objPtr = Jim_NewStringObj(interp, buf, -1); #else objPtr = Jim_NewEmptyStringObj(interp); #endif objPtr->typePtr = &_scriptLineObjType; objPtr->internalRep.scriptLineValue.argc = argc; objPtr->internalRep.scriptLineValue.line = line; return objPtr; } #pragma endregion // Script Object // // This object holds the parsed internal representation of a script. This representation is help within an allocated ScriptObj (see below) #pragma region Script Object static __device__ void FreeScriptInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupScriptInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void JimSetScriptFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); static __device__ int JimParseCheckMissing(Jim_Interp *interp, int ch); __constant__ static const Jim_ObjType _scriptObjType = { "script", FreeScriptInternalRep, DupScriptInternalRep, NULL, JIM_TYPE_REFERENCES, }; // Each token of a script is represented by a ScriptToken. The ScriptToken contains a type and a Jim_Obj. The Jim_Obj can be specialized by commands operating on it. typedef struct ScriptToken { Jim_Obj *objPtr; int type; } ScriptToken; // This is the script object internal representation. An array of ScriptToken structures, including a pre-computed representation of the command length and arguments. // // For example the script: // // puts hello // set $i $x$y [foo]BAR // // will produce a ScriptObj with the following ScriptToken's: // // LIN 2 // ESC puts // ESC hello // LIN 4 // ESC set // VAR i // WRD 2 // VAR x // VAR y // WRD 2 // CMD foo // ESC BAR // // "puts hello" has two args (LIN 2), composed of single tokens. (Note that the WRD token is omitted for the common case of a single token.) // // "set $i $x$y [foo]BAR" has four (LIN 4) args, the first word has 1 token (ESC SET), and the last has two tokens (WRD 2 CMD foo ESC BAR) // // The precomputation of the command structure makes Jim_Eval() faster, and simpler because there aren't dynamic lengths / allocations. // // -- {expand}/{*} handling -- // // Expand is handled in a special way. // // If a "word" begins with {*}, the word token count is -ve. // // For example the command: // // list {*}{a b} // // Will produce the following cmdstruct array: // // LIN 2 // ESC list // WRD -1 // STR a b // // Note that the 'LIN' token also contains the source information for the first word of the line for error reporting purposes // // -- the substFlags field of the structure -- // // The scriptObj structure is used to represent both "script" objects and "subst" objects. In the second case, the there are no LIN and WRD // tokens. Instead SEP and EOL tokens are added as-is. In addition, the field 'substFlags' is used to represent the flags used to turn // the string into the internal representation. If these flags do not match what the application requires, // the scriptObj is created again. For example the script: // // subst -nocommands $string // subst -novariables $string // // Will (re)create the internal representation of the $string object two times. typedef struct ScriptObj { ScriptToken *token; // Tokens array Jim_Obj *fileNameObj; // Filename int len; // Length of token[] int substFlags; // flags used for the compilation of "subst" objects int inUse; // Used to share a ScriptObj. Currently only used by Jim_EvalObj() as protection against shimmering of the currently evaluated object. int firstline; // Line number of the first line int linenr; // Error line number, if any int missing; // Missing char if script failed to parse, (or space or backslash if OK) } ScriptObj; __device__ void FreeScriptInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { struct ScriptObj *script = (struct ScriptObj *)objPtr->internalRep.ptr; if (--script->inUse != 0) return; for (int i = 0; i < script->len; i++) Jim_DecrRefCount(interp, script->token[i].objPtr); Jim_Free(script->token); Jim_DecrRefCount(interp, script->fileNameObj); Jim_Free(script); } __device__ void DupScriptInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); JIM_NOTUSED(srcPtr); // Just return a simple string. We don't try to preserve the source info since in practice scripts are never duplicated dupPtr->typePtr = NULL; } // A simple parse token. As the script is parsed, the created tokens point into the script string rep. typedef struct { const char *token; // Pointer to the start of the token int len; // Length of this token int type; // Token type int line; // Line number } ParseToken; // A list of parsed tokens representing a script. Tokens are added to this list as the script is parsed. It grows as needed. typedef struct { // Start with a statically allocated list of tokens which will be expanded with realloc if needed ParseToken *list; // Array of tokens int size; // Current size of the list int count; // Number of entries used ParseToken static_list[20]; // Small initial token space to avoid allocation } ParseTokenList; static __device__ void ScriptTokenListInit(ParseTokenList *tokenlist) { tokenlist->list = tokenlist->static_list; tokenlist->size = sizeof(tokenlist->static_list) / sizeof(ParseToken); tokenlist->count = 0; } static __device__ void ScriptTokenListFree(ParseTokenList *tokenlist) { if (tokenlist->list != tokenlist->static_list) Jim_Free(tokenlist->list); } // Adds the new token to the tokenlist. The token has the given length, type and line number. The token list is resized as necessary. static __device__ void ScriptAddToken(ParseTokenList *tokenlist, const char *token, int len, int type, int line) { if (tokenlist->count == tokenlist->size) { // Resize the list tokenlist->size *= 2; if (tokenlist->list != tokenlist->static_list) tokenlist->list = (ParseToken *)Jim_Realloc(tokenlist->list, tokenlist->size * sizeof(*tokenlist->list)); else { // The list needs to become allocated tokenlist->list = (ParseToken *)Jim_Alloc(tokenlist->size * sizeof(*tokenlist->list)); memcpy(tokenlist->list, tokenlist->static_list, tokenlist->count * sizeof(*tokenlist->list)); } } ParseToken *t = &tokenlist->list[tokenlist->count++]; t->token = token; t->len = len; t->type = type; t->line = line; } // Counts the number of adjoining non-separator tokens. // // Returns -ve if the first token is the expansion operator (in which case the count doesn't include that token). static __device__ int JimCountWordTokens(ParseToken *t) { int expand = 1; int count = 0; // Is the first word {*} or {expand}? if (t->type == JIM_TT_STR && !TOKEN_IS_SEP(t[1].type)) { if ((t->len == 1 && *t->token == '*') || (t->len == 6 && strncmp(t->token, "expand", 6) == 0)) { expand = -1; t++; } // Create an expand token } // Now count non-separator words while (!TOKEN_IS_SEP(t->type)) { t++; count++; } return count * expand; } // Create a script/subst object from the given token. static __device__ Jim_Obj *JimMakeScriptObj(Jim_Interp *interp, const ParseToken *t) { if (t->type == JIM_TT_ESC && memchr(t->token, '\\', t->len) != NULL) { // Convert backlash escapes. The result will never be longer than the original int len = t->len; char *str = (char *)Jim_Alloc(len + 1); len = JimEscape(str, t->token, len); return Jim_NewStringObjNoAlloc(interp, str, len); } // XXX: For strict Tcl compatibility, JIM_TT_STR should replace <backslash><newline><whitespace> with a single space. return Jim_NewStringObj(interp, t->token, t->len); } // Takes a tokenlist and creates the allocated list of script tokens in script->token, of length script->len. // Unnecessary tokens are discarded, and LINE and WORD tokens are inserted as required. // Also sets script->line to the line number of the first token static __device__ void ScriptObjAddTokens(Jim_Interp *interp, struct ScriptObj *script, ParseTokenList *tokenlist) { int i; int lineargs = 0; // Number of tokens so far for the current command #ifdef DEBUG_SHOW_SCRIPT_TOKENS printf("==== Tokens ====\n"); for (i = 0; i < tokenlist->count; i++) #if __HIPCC__ printf("[%2d]@%d %s '%s'\n", i, tokenlist->list[i].line, jim_tt_name(tokenlist->list[i].type), tokenlist->list[i].token); #else printf("[%2d]@%d %s '%.*s'\n", i, tokenlist->list[i].line, jim_tt_name(tokenlist->list[i].type), tokenlist->list[i].len, tokenlist->list[i].token); #endif #endif // May need up to one extra script token for each EOL in the worst case int count = tokenlist->count; for (i = 0; i < tokenlist->count; i++) if (tokenlist->list[i].type == JIM_TT_EOL) count++; int linenr = script->firstline = tokenlist->list[0].line; struct ScriptToken *token = script->token = (ScriptToken *)Jim_Alloc(sizeof(ScriptToken) * count); // This is the first token for the current command ScriptToken *linefirst = token++; // This is the first token for the current command for (i = 0; i < tokenlist->count; ) { // Skip any leading separators while (tokenlist->list[i].type == JIM_TT_SEP) i++; // Look ahead to find out how many tokens make up the next word int wordtokens = JimCountWordTokens(tokenlist->list + i); if (wordtokens == 0) { // None, so at end of line if (lineargs) { linefirst->type = JIM_TT_LINE; linefirst->objPtr = JimNewScriptLineObj(interp, lineargs, linenr); Jim_IncrRefCount(linefirst->objPtr); // Reset for new line lineargs = 0; linefirst = token++; } i++; continue; } else if (wordtokens != 1) { // More than 1, or {*}, so insert a WORD token token->type = JIM_TT_WORD; token->objPtr = Jim_NewIntObj(interp, wordtokens); Jim_IncrRefCount(token->objPtr); token++; if (wordtokens < 0) { // Skip the expand token i++; wordtokens = -wordtokens - 1; lineargs--; } } // First real token on the line, so record the line number if (lineargs == 0) linenr = tokenlist->list[i].line; lineargs++; // Add each non-separator word token to the line while (wordtokens--) { const ParseToken *t = &tokenlist->list[i++]; token->type = t->type; token->objPtr = JimMakeScriptObj(interp, t); Jim_IncrRefCount(token->objPtr); // Every object is initially a string of type 'source', but the internal type may be specialized during execution of the script. JimSetSourceInfo(interp, token->objPtr, script->fileNameObj, t->line); token++; } } if (lineargs == 0) token--; script->len = (int)(token - script->token); JimPanic(script->len >= count, "allocated script array is too short"); #ifdef DEBUG_SHOW_SCRIPT printf("==== Script (%s) ====\n", Jim_String(script->fileNameObj)); for (i = 0; i < script->len; i++) { const ScriptToken *t = &script->token[i]; printf("[%2d] %s %s\n", i, jim_tt_name(t->type), Jim_String(t->objPtr)); } #endif } // Sets an appropriate error message for a missing script/expression terminator. // Returns JIM_ERROR if 'ch' represents an unmatched/missing character. // Note that a trailing backslash is not considered to be an error. static __device__ int JimParseCheckMissing(Jim_Interp *interp, int ch) { const char *msg; switch (ch) { case '\\': case ' ': return JIM_OK; case '[': msg = "unmatched \"[\""; break; case '{': msg = "missing close-brace"; break; case '"': default: msg = "missing quote"; break; } Jim_SetResultString(interp, msg, -1); return JIM_ERROR; } // Similar to ScriptObjAddTokens(), but for subst objects. static __device__ void SubstObjAddTokens(Jim_Interp *interp, struct ScriptObj *script, ParseTokenList *tokenlist) { int i; struct ScriptToken *token = script->token = (ScriptToken *)Jim_Alloc(sizeof(ScriptToken) * tokenlist->count); for (i = 0; i < tokenlist->count; i++) { const ParseToken *t = &tokenlist->list[i]; // Create a token for 't' token->type = t->type; token->objPtr = JimMakeScriptObj(interp, t); Jim_IncrRefCount(token->objPtr); token++; } script->len = i; } // This method takes the string representation of an object as a Tcl script, and generates the pre-parsed internal representation of the script. // On parse error, sets an error message and returns JIM_ERROR (Note: the object is still converted to a script, even if an error occurs) static __device__ void JimSetScriptFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { // Try to get information about filename / line number int line = 1; if (objPtr->typePtr == &_sourceObjType) line = objPtr->internalRep.sourceValue.lineNumber; // Initially parse the script into tokens (in tokenlist) ParseTokenList tokenlist; ScriptTokenListInit(&tokenlist); int scriptTextLen; const char *scriptText = Jim_GetString(objPtr, &scriptTextLen); struct JimParserCtx parser; JimParserInit(&parser, scriptText, scriptTextLen, line); while (!parser.eof) { JimParseScript(&parser); ScriptAddToken(&tokenlist, parser.tstart, (int)(parser.tend - parser.tstart) + 1, parser.tt, parser.tline); } // Add a final EOF token ScriptAddToken(&tokenlist, scriptText + scriptTextLen, 0, JIM_TT_EOF, 0); // Create the "real" script tokens from the parsed tokens struct ScriptObj *script = (ScriptObj *)Jim_Alloc(sizeof(*script)); memset(script, 0, sizeof(*script)); script->inUse = 1; script->fileNameObj = (objPtr->typePtr == &_sourceObjType ? objPtr->internalRep.sourceValue.fileNameObj : interp->emptyObj); Jim_IncrRefCount(script->fileNameObj); script->missing = parser.missing.ch; script->linenr = parser.missing.line; ScriptObjAddTokens(interp, script, &tokenlist); // No longer need the token list ScriptTokenListFree(&tokenlist); // Free the old internal rep and set the new one. Jim_FreeIntRep(interp, objPtr); Jim_SetIntRepPtr(objPtr, script); objPtr->typePtr = &_scriptObjType; } static __device__ void JimAddErrorToStack(Jim_Interp *interp, ScriptObj *script); // Returns the parsed script. Note that if there is any possibility that the script is not valid, call JimScriptValid() to check __device__ ScriptObj *JimGetScript(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr == interp->emptyObj) objPtr = interp->nullScriptObj; // Avoid converting emptyObj to a script. use nullScriptObj instead. if (objPtr->typePtr != &_scriptObjType || ((struct ScriptObj *)Jim_GetIntRepPtr(objPtr))->substFlags) JimSetScriptFromAny(interp, objPtr); return (ScriptObj *)Jim_GetIntRepPtr(objPtr); } // Returns 1 if the script is valid (parsed ok), otherwise returns 0 and leaves an error message in the interp result. static __device__ int JimScriptValid(Jim_Interp *interp, ScriptObj *script) { if (JimParseCheckMissing(interp, script->missing) == JIM_ERROR) { JimAddErrorToStack(interp, script); return 0; } return 1; } #pragma endregion // ----------------------------------------------------------------------------- // Commands // ----------------------------------------------------------------------------- #pragma region Commands static __device__ void JimIncrCmdRefCount(Jim_Cmd *cmdPtr) { cmdPtr->inUse++; } static __device__ void JimDecrCmdRefCount(Jim_Interp *interp, Jim_Cmd *cmdPtr) { if (--cmdPtr->inUse == 0) { if (cmdPtr->isproc) { Jim_DecrRefCount(interp, cmdPtr->u.proc.argListObjPtr); Jim_DecrRefCount(interp, cmdPtr->u.proc.bodyObjPtr); Jim_DecrRefCount(interp, cmdPtr->u.proc.nsObj); if (cmdPtr->u.proc.staticVars) { Jim_FreeHashTable(cmdPtr->u.proc.staticVars); Jim_Free(cmdPtr->u.proc.staticVars); } } // native (C) else if (cmdPtr->u.native.delProc) cmdPtr->u.native.delProc(cmdPtr->u.native.privData, interp); // Delete any pushed command too if (cmdPtr->prevCmd) JimDecrCmdRefCount(interp, cmdPtr->prevCmd); Jim_Free(cmdPtr); } } // Variables HashTable Type. // Keys are dynamically allocated strings, Values are Jim_Var structures. // Variables HashTable Type. // Keys are dynamic allocated strings, Values are Jim_Var structures. */ static __device__ void JimVariablesHTValDestructor(void *interp, void *val) { Jim_DecrRefCount((Jim_Interp *)interp, ((Jim_Var *)val)->objPtr); Jim_Free(val); } __constant__ static const Jim_HashTableType JimVariablesHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor JimVariablesHTValDestructor // val destructor }; // Commands HashTable Type. // Keys are dynamic allocated strings, Values are Jim_Cmd structures. static __device__ void JimCommandsHT_ValDestructor(void *interp, void *val) { JimDecrCmdRefCount((Jim_Interp *)interp, (Jim_Cmd *)val); } __constant__ static const Jim_HashTableType JimCommandsHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor JimCommandsHT_ValDestructor // val destructor }; // ------------------------- Commands related functions --------------------- #ifdef jim_ext_namespace // Returns the "unscoped" version of the given namespace. That is, the fully qualified name without the leading :: // The returned value is either nsObj, or an object with a zero ref count. static __device__ Jim_Obj *JimQualifyNameObj(Jim_Interp *interp, Jim_Obj *nsObj) { const char *name = Jim_String(nsObj); if (name[0] == ':' && name[1] == ':') { while (*++name == ':') { } // This command is being defined in the global namespace nsObj = Jim_NewStringObj(interp, name, -1); } else if (Jim_Length(interp->framePtr->nsObj)) { // This command is being defined in a non-global namespace nsObj = Jim_DuplicateObj(interp, interp->framePtr->nsObj); Jim_AppendStrings(interp, nsObj, "::", name, NULL); } return nsObj; } __device__ Jim_Obj *Jim_MakeGlobalNamespaceName(Jim_Interp *interp, Jim_Obj *nameObjPtr) { const char *name = Jim_String(nameObjPtr); if (name[0] == ':' && name[1] == ':') return nameObjPtr; Jim_IncrRefCount(nameObjPtr); Jim_Obj *resultObj = Jim_NewStringObj(interp, "::", -1); Jim_AppendObj(interp, resultObj, nameObjPtr); Jim_DecrRefCount(interp, nameObjPtr); return resultObj; } // An efficient version of JimQualifyNameObj() where the name is available (and needed) as a 'const char *'. // Avoids creating an object if not necessary. The object stored in *objPtrPtr should be disposed of with JimFreeQualifiedName() after use. static __device__ const char *JimQualifyName(Jim_Interp *interp, const char *name, Jim_Obj **objPtrPtr) { Jim_Obj *objPtr = interp->emptyObj; if (name[0] == ':' && name[1] == ':') while (*++name == ':') { } // This command is being defined in the global namespace else if (Jim_Length(interp->framePtr->nsObj)) { // This command is being defined in a non-global namespace objPtr = Jim_DuplicateObj(interp, interp->framePtr->nsObj); Jim_AppendStrings(interp, objPtr, "::", name, NULL); name = Jim_String(objPtr); } Jim_IncrRefCount(objPtr); *objPtrPtr = objPtr; return name; } #define JimFreeQualifiedName(INTERP, OBJ) Jim_DecrRefCount((INTERP), (OBJ)) #else // We can be more efficient in the no-namespace case #define JimQualifyName(INTERP, NAME, DUMMY) ((NAME)[0] == ':' && (NAME)[1] == ':' ? (NAME) + 2 : (NAME)) #define JimFreeQualifiedName(INTERP, DUMMY) (void)(DUMMY) __device__ Jim_Obj *Jim_MakeGlobalNamespaceName(Jim_Interp *interp, Jim_Obj *nameObjPtr) { return nameObjPtr; } #endif static __device__ int JimCreateCommand(Jim_Interp *interp, const char *name, Jim_Cmd *cmd) { // It may already exist, so we try to delete the old one. Note that reference count means that it won't be deleted yet if it exists in the call stack. // BUT, if 'local' is in force, instead of deleting the existing proc, we stash a reference to the old proc here. Jim_HashEntry *he = Jim_FindHashEntry(&interp->commands, name); // There was an old cmd with the same name, so this requires a 'proc epoch' update. // If a procedure with the same name didn't exist there is no need to increment the 'proc epoch' because creation of a new procedure // can never affect existing cached commands. We don't do negative caching. if (he) Jim_InterpIncrProcEpoch(interp); if (he && interp->local) { // Push this command over the top of the previous one cmd->prevCmd = (Jim_Cmd *)Jim_GetHashEntryVal(he); Jim_SetHashVal(&interp->commands, he, cmd); } else { // Replace the existing command if (he) Jim_DeleteHashEntry(&interp->commands, name); Jim_AddHashEntry(&interp->commands, name, cmd); } return JIM_OK; } __device__ int Jim_CreateCommand(Jim_Interp *interp, const char *cmdNameStr, Jim_CmdProc cmdProc, void *privData, Jim_DelCmdProc delProc) { Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_Alloc(sizeof(*cmdPtr)); // Store the new details for this command memset(cmdPtr, 0, sizeof(*cmdPtr)); cmdPtr->inUse = 1; cmdPtr->u.native.delProc = delProc; cmdPtr->u.native.cmdProc = cmdProc; cmdPtr->u.native.privData = privData; JimCreateCommand(interp, cmdNameStr, cmdPtr); return JIM_OK; } static __device__ int JimCreateProcedureStatics(Jim_Interp *interp, Jim_Cmd *cmdPtr, Jim_Obj *staticsListObjPtr) { int len = Jim_ListLength(interp, staticsListObjPtr); if (len == 0) return JIM_OK; cmdPtr->u.proc.staticVars = (Jim_HashTable *)Jim_Alloc(sizeof(Jim_HashTable)); Jim_InitHashTable(cmdPtr->u.proc.staticVars, &JimVariablesHashTableType, interp); for (int i = 0; i < len; i++) { Jim_Obj *objPtr = Jim_ListGetIndex(interp, staticsListObjPtr, i); // Check if it's composed of two elements. int subLen = Jim_ListLength(interp, objPtr); if (subLen == 1 || subLen == 2) { // Try to get the variable value from the current environment. Jim_Obj *initObjPtr; Jim_Obj *nameObjPtr = Jim_ListGetIndex(interp, objPtr, 0); if (subLen == 1) { initObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_NONE); if (initObjPtr == NULL) { Jim_SetResultFormatted(interp, "variable for initialization of static \"%#s\" not found in the local context", nameObjPtr); return JIM_ERROR; } } else initObjPtr = Jim_ListGetIndex(interp, objPtr, 1); if (JimValidName(interp, "static variable", nameObjPtr) != JIM_OK) return JIM_ERROR; Jim_Var *varPtr = (Jim_Var *)Jim_Alloc(sizeof(*varPtr)); varPtr->objPtr = initObjPtr; Jim_IncrRefCount(initObjPtr); varPtr->linkFramePtr = NULL; if (Jim_AddHashEntry(cmdPtr->u.proc.staticVars, Jim_String(nameObjPtr), varPtr) != JIM_OK) { Jim_SetResultFormatted(interp, "static variable name \"%#s\" duplicated in statics list", nameObjPtr); Jim_DecrRefCount(interp, initObjPtr); Jim_Free(varPtr); return JIM_ERROR; } } else { Jim_SetResultFormatted(interp, "too many fields in static specifier \"%#s\"", objPtr); return JIM_ERROR; } } return JIM_OK; } static __device__ void JimUpdateProcNamespace(Jim_Interp *interp, Jim_Cmd *cmdPtr, const char *cmdname) { #ifdef jim_ext_namespace if (cmdPtr->isproc) { const char *pt = strrchr((char *)cmdname, ':'); // XXX: Really need JimNamespaceSplit() if (pt && pt != cmdname && pt[-1] == ':') { Jim_DecrRefCount(interp, cmdPtr->u.proc.nsObj); cmdPtr->u.proc.nsObj = Jim_NewStringObj(interp, cmdname, (int)(pt - cmdname - 1)); Jim_IncrRefCount(cmdPtr->u.proc.nsObj); // This commands shadows a global command, so a proc epoch update is required if (Jim_FindHashEntry(&interp->commands, pt + 1)) Jim_InterpIncrProcEpoch(interp); } } #endif } static __device__ Jim_Cmd *JimCreateProcedureCmd(Jim_Interp *interp, Jim_Obj *argListObjPtr, Jim_Obj *staticsListObjPtr, Jim_Obj *bodyObjPtr, Jim_Obj *nsObj) { int argListLen = Jim_ListLength(interp, argListObjPtr); // Allocate space for both the command pointer and the arg list Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_Alloc(sizeof(*cmdPtr) + sizeof(struct Jim_Cmd::a_::c_::Jim_ProcArg) * argListLen); memset(cmdPtr, 0, sizeof(*cmdPtr)); cmdPtr->inUse = 1; cmdPtr->isproc = 1; cmdPtr->u.proc.argListObjPtr = argListObjPtr; cmdPtr->u.proc.argListLen = argListLen; cmdPtr->u.proc.bodyObjPtr = bodyObjPtr; cmdPtr->u.proc.argsPos = -1; cmdPtr->u.proc.arglist = (struct Jim_Cmd::a_::c_::Jim_ProcArg *)(cmdPtr + 1); cmdPtr->u.proc.nsObj = nsObj ? nsObj : interp->emptyObj; Jim_IncrRefCount(argListObjPtr); Jim_IncrRefCount(bodyObjPtr); Jim_IncrRefCount(cmdPtr->u.proc.nsObj); // Create the statics hash table. if (staticsListObjPtr && JimCreateProcedureStatics(interp, cmdPtr, staticsListObjPtr) != JIM_OK) goto err; // Parse the args out into arglist, validating as we go Examine the argument list for default parameters and 'args' for (int i = 0; i < argListLen; i++) { // Examine a parameter Jim_Obj *argPtr = Jim_ListGetIndex(interp, argListObjPtr, i); int len = Jim_ListLength(interp, argPtr); if (len == 0) { Jim_SetResultString(interp, "argument with no name", -1); err: JimDecrCmdRefCount(interp, cmdPtr); return NULL; } if (len > 2) { Jim_SetResultFormatted(interp, "too many fields in argument specifier \"%#s\"", argPtr); goto err; } Jim_Obj *nameObjPtr; Jim_Obj *defaultObjPtr; if (len == 2) { // Optional parameter nameObjPtr = Jim_ListGetIndex(interp, argPtr, 0); defaultObjPtr = Jim_ListGetIndex(interp, argPtr, 1); } else { // Required parameter nameObjPtr = argPtr; defaultObjPtr = NULL; } if (Jim_CompareStringImmediate(interp, nameObjPtr, "args")) { if (cmdPtr->u.proc.argsPos >= 0) { Jim_SetResultString(interp, "'args' specified more than once", -1); goto err; } cmdPtr->u.proc.argsPos = i; } else { if (len == 2) cmdPtr->u.proc.optArity++; else cmdPtr->u.proc.reqArity++; } cmdPtr->u.proc.arglist[i].nameObjPtr = nameObjPtr; cmdPtr->u.proc.arglist[i].defaultObjPtr = defaultObjPtr; } return cmdPtr; } __device__ int Jim_DeleteCommand(Jim_Interp *interp, const char *name) { int ret = JIM_OK; Jim_Obj *qualifiedNameObj; const char *qualname = JimQualifyName(interp, name, &qualifiedNameObj); if (Jim_DeleteHashEntry(&interp->commands, qualname) == JIM_ERROR) { Jim_SetResultFormatted(interp, "can't delete \"%s\": command doesn't exist", name); ret = JIM_ERROR; } else Jim_InterpIncrProcEpoch(interp); JimFreeQualifiedName(interp, qualifiedNameObj); return ret; } __device__ int Jim_RenameCommand(Jim_Interp *interp, const char *oldName, const char *newName) { int ret = JIM_ERROR; if (newName[0] == 0) return Jim_DeleteCommand(interp, oldName); Jim_Obj *qualifiedOldNameObj; Jim_Obj *qualifiedNewNameObj; const char *fqold = JimQualifyName(interp, oldName, &qualifiedOldNameObj); const char *fqnew = JimQualifyName(interp, newName, &qualifiedNewNameObj); // Does it exist? Jim_HashEntry *he = Jim_FindHashEntry(&interp->commands, fqold); if (he == NULL) Jim_SetResultFormatted(interp, "can't rename \"%s\": command doesn't exist", oldName); else if (Jim_FindHashEntry(&interp->commands, fqnew)) Jim_SetResultFormatted(interp, "can't rename to \"%s\": command already exists", newName); else { // Add the new name first Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_GetHashEntryVal(he); JimIncrCmdRefCount(cmdPtr); JimUpdateProcNamespace(interp, cmdPtr, fqnew); Jim_AddHashEntry(&interp->commands, fqnew, cmdPtr); // Now remove the old name Jim_DeleteHashEntry(&interp->commands, fqold); // Increment the epoch Jim_InterpIncrProcEpoch(interp); ret = JIM_OK; } JimFreeQualifiedName(interp, qualifiedOldNameObj); JimFreeQualifiedName(interp, qualifiedNewNameObj); return ret; } #pragma endregion // ----------------------------------------------------------------------------- // Command object // ----------------------------------------------------------------------------- #pragma region Command object static __device__ void FreeCommandInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.cmdValue.nsObj); } static __device__ void DupCommandInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { dupPtr->internalRep.cmdValue = srcPtr->internalRep.cmdValue; dupPtr->typePtr = srcPtr->typePtr; Jim_IncrRefCount(dupPtr->internalRep.cmdValue.nsObj); } __constant__ static const Jim_ObjType _commandObjType = { "command", FreeCommandInternalRep, DupCommandInternalRep, NULL, JIM_TYPE_REFERENCES, }; // This function returns the command structure for the command name stored in objPtr. It tries to specialize the objPtr to contain // a cached info instead to perform the lookup into the hash table every time. The information cached may not be uptodate, in such // a case the lookup is performed and the cache updated. // Respects the 'upcall' setting __device__ Jim_Cmd *Jim_GetCommand(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { Jim_Cmd *cmd; // In order to be valid, the proc epoch must match and the lookup must have occurred in the same namespace if (objPtr->typePtr != &_commandObjType || objPtr->internalRep.cmdValue.procEpoch != interp->procEpoch #ifdef jim_ext_namespace || !Jim_StringEqObj(objPtr->internalRep.cmdValue.nsObj, interp->framePtr->nsObj) #endif ) { // Not cached or out of date, so lookup // Do we need to try the local namespace? const char *name = Jim_String(objPtr); Jim_HashEntry *he; if (name[0] == ':' && name[1] == ':') while (*++name == ':') { } #ifdef jim_ext_namespace else if (Jim_Length(interp->framePtr->nsObj)) { // This command is being defined in a non-global namespace Jim_Obj *nameObj = Jim_DuplicateObj(interp, interp->framePtr->nsObj); Jim_AppendStrings(interp, nameObj, "::", name, NULL); he = Jim_FindHashEntry(&interp->commands, Jim_String(nameObj)); Jim_FreeNewObj(interp, nameObj); if (he) goto found; } #endif // Lookup in the global namespace he = Jim_FindHashEntry(&interp->commands, name); if (he == NULL) { if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "invalid command name \"%#s\"", objPtr); return NULL; } #ifdef jim_ext_namespace found: #endif cmd = (Jim_Cmd *)Jim_GetHashEntryVal(he); // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_commandObjType; objPtr->internalRep.cmdValue.procEpoch = interp->procEpoch; objPtr->internalRep.cmdValue.cmdPtr = cmd; objPtr->internalRep.cmdValue.nsObj = interp->framePtr->nsObj; Jim_IncrRefCount(interp->framePtr->nsObj); } else cmd = objPtr->internalRep.cmdValue.cmdPtr; while (cmd->u.proc.upcall) cmd = cmd->prevCmd; return cmd; } #pragma endregion // ----------------------------------------------------------------------------- // Variables // ----------------------------------------------------------------------------- // DEG: pragma for more? // ----------------------------------------------------------------------------- // Variable object // ----------------------------------------------------------------------------- #pragma region Variable object #define JIM_DICT_SUGAR 100 // Only returned by SetVariableFromAny() static __device__ int SetVariableFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); static __device__ const Jim_ObjType _variableObjType = { "variable", NULL, NULL, NULL, JIM_TYPE_REFERENCES, }; // Check that the name does not contain embedded nulls. Variable and procedure names are manipulated as null terminated strings, so don't allow names with embedded nulls. static __device__ int JimValidName(Jim_Interp *interp, const char *type, Jim_Obj *nameObjPtr) { // Variable names and proc names can't contain embedded nulls if (nameObjPtr->typePtr != &_variableObjType) { int len; const char *str = Jim_GetString(nameObjPtr, &len); if (memchr(str, '\0', len)) { Jim_SetResultFormatted(interp, "%s name contains embedded null", type); return JIM_ERROR; } } return JIM_OK; } // This method should be called only by the variable API. It returns JIM_OK on success (variable already exists), // JIM_ERROR if it does not exist, JIM_DICT_SUGAR if it's not a variable name, but syntax glue for [dict] i.e. the last character is ')' static __device__ int SetVariableFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { // Check if the object is already an uptodate variable Jim_CallFrame *framePtr; if (objPtr->typePtr == &_variableObjType) { framePtr = (objPtr->internalRep.varValue.global ? interp->topFramePtr : interp->framePtr); if (objPtr->internalRep.varValue.callFrameId == framePtr->id) return JIM_OK; // nothing to do // Need to re-resolve the variable in the updated callframe } else if (objPtr->typePtr == &_dictSubstObjType) return JIM_DICT_SUGAR; else if (JimValidName(interp, "variable", objPtr) != JIM_OK) return JIM_ERROR; int len; const char *varName = Jim_GetString(objPtr, &len); // Make sure it's not syntax glue to get/set dict. if (len && varName[len - 1] == ')' && strchr(varName, '(') != NULL) return JIM_DICT_SUGAR; int global; if (varName[0] == ':' && varName[1] == ':') { while (*++varName == ':') { } global = 1; framePtr = interp->topFramePtr; } else { global = 0; framePtr = interp->framePtr; } // Resolve this name in the variables hash table Jim_HashEntry *he = Jim_FindHashEntry(&framePtr->vars, varName); if (he == NULL) { if (!global && framePtr->staticVars) he = Jim_FindHashEntry(framePtr->staticVars, varName); // Try with static vars. if (he == NULL) return JIM_ERROR; } // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_variableObjType; objPtr->internalRep.varValue.callFrameId = framePtr->id; objPtr->internalRep.varValue.varPtr = (Jim_Var *)Jim_GetHashEntryVal(he); objPtr->internalRep.varValue.global = global; return JIM_OK; } // -------------------- Variables related functions ------------------------- static __device__ int JimDictSugarSet(Jim_Interp *interp, Jim_Obj *ObjPtr, Jim_Obj *valObjPtr); static __device__ Jim_Obj *JimDictSugarGet(Jim_Interp *interp, Jim_Obj *ObjPtr, int flags); static __device__ Jim_Var *JimCreateVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *valObjPtr) { // New variable to create Jim_Var *var = (Jim_Var *)Jim_Alloc(sizeof(*var)); var->objPtr = valObjPtr; Jim_IncrRefCount(valObjPtr); var->linkFramePtr = NULL; Jim_CallFrame *framePtr; int global; const char *name = Jim_String(nameObjPtr); if (name[0] == ':' && name[1] == ':') { while (*++name == ':') { } framePtr = interp->topFramePtr; global = 1; } else { framePtr = interp->framePtr; global = 0; } // Insert the new variable Jim_AddHashEntry(&framePtr->vars, name, var); // Make the object int rep a variable Jim_FreeIntRep(interp, nameObjPtr); nameObjPtr->typePtr = &_variableObjType; nameObjPtr->internalRep.varValue.callFrameId = framePtr->id; nameObjPtr->internalRep.varValue.varPtr = var; nameObjPtr->internalRep.varValue.global = global; return var; } // For now that's dummy. Variables lookup should be optimized in many ways, with caching of lookups, and possibly with a table of pre-allocated vars in every CallFrame for local vars. // All the caching should also have an 'epoch' mechanism similar to the one used by Tcl for procedures lookup caching. __device__ int Jim_SetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *valObjPtr, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } flags &= ~JIMGLOBAL_; switch (SetVariableFromAny(interp, nameObjPtr)) { case JIM_DICT_SUGAR: if (global) interp->framePtr = savedFramePtr; return JimDictSugarSet(interp, nameObjPtr, valObjPtr); case JIM_ERROR: if (JimValidName(interp, "variable", nameObjPtr) != JIM_OK) { if (global) interp->framePtr = savedFramePtr; return JIM_ERROR; } JimCreateVariable(interp, nameObjPtr, valObjPtr); break; case JIM_OK: Jim_Var *var = nameObjPtr->internalRep.varValue.varPtr; if (var->linkFramePtr == NULL) { Jim_IncrRefCount(valObjPtr); Jim_DecrRefCount(interp, var->objPtr); var->objPtr = valObjPtr; } // else handle the link else { Jim_CallFrame *savedCallFrame = interp->framePtr; interp->framePtr = var->linkFramePtr; int err = Jim_SetVariable(interp, var->objPtr, valObjPtr, 0); interp->framePtr = savedCallFrame; if (err != JIM_OK) { if (global) interp->framePtr = savedFramePtr; return err; } } } if (global) interp->framePtr = savedFramePtr; return JIM_OK; } __device__ int Jim_SetVariableStr(Jim_Interp *interp, const char *name, Jim_Obj *objPtr, int flags) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); int result = Jim_SetVariable(interp, nameObjPtr, objPtr, flags); Jim_DecrRefCount(interp, nameObjPtr); return result; } __device__ int Jim_SetVariableStrWithStr(Jim_Interp *interp, const char *name, const char *val, int flags) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_Obj *valObjPtr = Jim_NewStringObj(interp, val, -1); Jim_IncrRefCount(nameObjPtr); Jim_IncrRefCount(valObjPtr); int result = Jim_SetVariable(interp, nameObjPtr, valObjPtr, flags); Jim_DecrRefCount(interp, nameObjPtr); Jim_DecrRefCount(interp, valObjPtr); return result; } __device__ int Jim_SetVariableLink(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *targetNameObjPtr, Jim_CallFrame *targetCallFrame) { // Check for an existing variable or link Jim_Var *varPtr; switch (SetVariableFromAny(interp, nameObjPtr)) { case JIM_DICT_SUGAR: // XXX: This message seem unnecessarily verbose, but it matches Tcl Jim_SetResultFormatted(interp, "bad variable name \"%#s\": upvar won't create a scalar variable that looks like an array element", nameObjPtr); return JIM_ERROR; case JIM_OK: varPtr = nameObjPtr->internalRep.varValue.varPtr; if (varPtr->linkFramePtr == NULL) { Jim_SetResultFormatted(interp, "variable \"%#s\" already exists", nameObjPtr); return JIM_ERROR; } // It exists, but is a link, so first delete the link varPtr->linkFramePtr = NULL; break; } // Resolve the call frames for both variables. XXX: SetVariableFromAny() already did this! const char *varName = Jim_String(nameObjPtr); Jim_CallFrame *framePtr; if (varName[0] == ':' && varName[1] == ':') { while (*++varName == ':') { } // Linking a global var does nothing framePtr = interp->topFramePtr; } else framePtr = interp->framePtr; const char *targetName = Jim_String(targetNameObjPtr); if (targetName[0] == ':' && targetName[1] == ':') { while (*++targetName == ':') { } targetNameObjPtr = Jim_NewStringObj(interp, targetName, -1); targetCallFrame = interp->topFramePtr; } Jim_IncrRefCount(targetNameObjPtr); if (framePtr->level < targetCallFrame->level) { Jim_SetResultFormatted(interp, "bad variable name \"%#s\": upvar won't create namespace variable that refers to procedure variable", nameObjPtr); Jim_DecrRefCount(interp, targetNameObjPtr); return JIM_ERROR; } // Check for cycles if (framePtr == targetCallFrame) { Jim_Obj *objPtr = targetNameObjPtr; // Cycles are only possible with 'uplevel 0' while (1) { if (strcmp(Jim_String(objPtr), varName) == 0) { Jim_SetResultString(interp, "can't upvar from variable to itself", -1); Jim_DecrRefCount(interp, targetNameObjPtr); return JIM_ERROR; } if (SetVariableFromAny(interp, objPtr) != JIM_OK) break; varPtr = objPtr->internalRep.varValue.varPtr; if (varPtr->linkFramePtr != targetCallFrame) break; objPtr = varPtr->objPtr; } } // Perform the binding Jim_SetVariable(interp, nameObjPtr, targetNameObjPtr); // We are now sure 'nameObjPtr' type is variableObjType nameObjPtr->internalRep.varValue.varPtr->linkFramePtr = targetCallFrame; Jim_DecrRefCount(interp, targetNameObjPtr); return JIM_OK; } // Return the Jim_Obj pointer associated with a variable name, or NULL if the variable was not found in the current context. // The same optimization discussed in the comment to the 'SetVariable' function should apply here. // // If JIM_UNSHARED is set and the variable is an array element (dict sugar) in a dictionary which is shared, the array variable value is duplicated first. // This allows the array element to be updated (e.g. append, lappend) without affecting other references to the dictionary. __device__ Jim_Obj *Jim_GetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } flags &= ~JIMGLOBAL_; switch (SetVariableFromAny(interp, nameObjPtr)) { case JIM_OK: { Jim_Var *varPtr = nameObjPtr->internalRep.varValue.varPtr; if (varPtr->linkFramePtr == NULL) { if (global) interp->framePtr = savedFramePtr; return varPtr->objPtr; } else { Jim_Obj *objPtr; // The variable is a link? Resolve it. Jim_CallFrame *savedCallFrame = interp->framePtr; interp->framePtr = varPtr->linkFramePtr; objPtr = Jim_GetVariable(interp, varPtr->objPtr, flags); interp->framePtr = savedCallFrame; if (objPtr) { if (global) interp->framePtr = savedFramePtr; return objPtr; } // Error, so fall through to the error message } break; } case JIM_DICT_SUGAR: // [dict] syntax sugar if (global) interp->framePtr = savedFramePtr; return JimDictSugarGet(interp, nameObjPtr, flags); } if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "can't read \"%#s\": no such variable", nameObjPtr); if (global) interp->framePtr = savedFramePtr; return NULL; } __device__ Jim_Obj *Jim_GetVariableStr(Jim_Interp *interp, const char *name, int flags) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); Jim_Obj *varObjPtr = Jim_GetVariable(interp, nameObjPtr, flags); Jim_DecrRefCount(interp, nameObjPtr); return varObjPtr; } // Unset a variable. Note: On success unset invalidates all the variable objects created in the current call frame incrementing. */ __device__ int Jim_UnsetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } flags &= ~JIMGLOBAL_; int retval = SetVariableFromAny(interp, nameObjPtr); if (retval == JIM_DICT_SUGAR) { // [dict] syntax sugar. if (global) interp->framePtr = savedFramePtr; return JimDictSugarSet(interp, nameObjPtr, NULL); } else if (retval == JIM_OK) { Jim_Var *varPtr = nameObjPtr->internalRep.varValue.varPtr; // If it's a link call UnsetVariable recursively Jim_CallFrame *framePtr; if (varPtr->linkFramePtr) { framePtr = interp->framePtr; interp->framePtr = varPtr->linkFramePtr; retval = Jim_UnsetVariable(interp, varPtr->objPtr, JIM_NONE); interp->framePtr = framePtr; } else { const char *name = Jim_String(nameObjPtr); if (nameObjPtr->internalRep.varValue.global) { name += 2; framePtr = interp->topFramePtr; } else framePtr = interp->framePtr; retval = Jim_DeleteHashEntry(&framePtr->vars, name); // Change the callframe id, invalidating var lookup caching if (retval == JIM_OK) framePtr->id = interp->callFrameEpoch++; } } if (retval != JIM_OK && (flags & JIM_ERRMSG)) Jim_SetResultFormatted(interp, "can't unset \"%#s\": no such variable", nameObjPtr); if (global) interp->framePtr = savedFramePtr; return retval; } // ---------- Dict syntax sugar (similar to array Tcl syntax) -------------- // Given a variable name for [dict] operation syntax sugar, this function returns two objects, the first with the name // of the variable to set, and the second with the respective key. For example "foo(bar)" will return objects with string repr. of "foo" and "bar". // The returned objects have refcount = 1. The function can't fail. static __device__ void JimDictSugarParseVarKey(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj **varPtrPtr, Jim_Obj **keyPtrPtr) { int len; const char *str = Jim_GetString(objPtr, &len); const char *p = strchr(str, '('); JimPanic(p == NULL, "JimDictSugarParseVarKey() called for non-dict-sugar (%s)", str); Jim_Obj *varObjPtr = Jim_NewStringObj(interp, str, (int)(p - str)); p++; int keyLen = (int)(str + len - p); if (str[len - 1] == ')') keyLen--; // Create the objects with the variable name and key. Jim_Obj *keyObjPtr = Jim_NewStringObj(interp, p, keyLen); Jim_IncrRefCount(varObjPtr); Jim_IncrRefCount(keyObjPtr); *varPtrPtr = varObjPtr; *keyPtrPtr = keyObjPtr; } // Helper of Jim_SetVariable() to deal with dict-syntax variable names. Also used by Jim_UnsetVariable() with valObjPtr = NULL. static __device__ int JimDictSugarSet(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *valObjPtr) { SetDictSubstFromAny(interp, objPtr); int err = Jim_SetDictKeysVector(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, &objPtr->internalRep.dictSubstValue.indexObjPtr, 1, valObjPtr, JIM_MUSTEXIST); if (err == JIM_OK) // Don't keep an extra ref to the result Jim_ResetResult(interp); else { // Better error message for unset a(2) where a exists but a(2) doesn't if (!valObjPtr) { if (Jim_GetVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, JIM_NONE)) { Jim_SetResultFormatted(interp, "can't unset \"%#s\": no such element in array", objPtr); return err; } } // Make the error more informative and Tcl-compatible Jim_SetResultFormatted(interp, "can't %s \"%#s\": variable isn't array", (valObjPtr ? "set" : "unset"), objPtr); } return err; } // Expands the array variable (dict sugar) and returns the result, or NULL on error. // If JIM_UNSHARED is set and the dictionary is shared, it will be duplicated and stored back to the variable before expansion. static __device__ Jim_Obj *JimDictExpandArrayVariable(Jim_Interp *interp, Jim_Obj *varObjPtr, Jim_Obj *keyObjPtr, int flags) { Jim_Obj *dictObjPtr = Jim_GetVariable(interp, varObjPtr, JIM_ERRMSG); if (!dictObjPtr) return NULL; Jim_Obj *resObjPtr = NULL; int ret = Jim_DictKey(interp, dictObjPtr, keyObjPtr, &resObjPtr, JIM_NONE); if (ret != JIM_OK) Jim_SetResultFormatted(interp, "can't read \"%#s(%#s)\": %s array", varObjPtr, keyObjPtr, ret < 0 ? "variable isn't" : "no such element in"); // Update the variable to have an unshared copy else if ((flags & JIM_UNSHARED) && Jim_IsShared(dictObjPtr)) Jim_SetVariable(interp, varObjPtr, Jim_DuplicateObj(interp, dictObjPtr)); return resObjPtr; } // Helper of Jim_GetVariable() to deal with dict-syntax variable names static __device__ Jim_Obj *JimDictSugarGet(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { SetDictSubstFromAny(interp, objPtr); return JimDictExpandArrayVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, objPtr->internalRep.dictSubstValue.indexObjPtr, flags); } // --------- $var(INDEX) substitution, using a specialized object ----------- __device__ void FreeDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr); Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.indexObjPtr); } __device__ void DupDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); dupPtr->internalRep.dictSubstValue.varNameObjPtr = srcPtr->internalRep.dictSubstValue.varNameObjPtr; dupPtr->internalRep.dictSubstValue.indexObjPtr = srcPtr->internalRep.dictSubstValue.indexObjPtr; dupPtr->typePtr = &_dictSubstObjType; } // Note: The object *must* be in dict-sugar format static __device__ void SetDictSubstFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_dictSubstObjType) { Jim_Obj *varObjPtr, *keyObjPtr; if (objPtr->typePtr == &_interpolatedObjType) { // An interpolated object in dict-sugar form varObjPtr = objPtr->internalRep.dictSubstValue.varNameObjPtr; keyObjPtr = objPtr->internalRep.dictSubstValue.indexObjPtr; Jim_IncrRefCount(varObjPtr); Jim_IncrRefCount(keyObjPtr); } else JimDictSugarParseVarKey(interp, objPtr, &varObjPtr, &keyObjPtr); Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_dictSubstObjType; objPtr->internalRep.dictSubstValue.varNameObjPtr = varObjPtr; objPtr->internalRep.dictSubstValue.indexObjPtr = keyObjPtr; } } // This function is used to expand [dict get] sugar in the form of $var(INDEX). The function is mainly used by Jim_EvalObj() // to deal with tokens of type JIM_TT_DICTSUGAR. objPtr points to an object that is *guaranteed* to be in the form VARNAME(INDEX). // The 'index' part is [subst]ituted, and is used to lookup a key inside the [dict]ionary contained in variable VARNAME. static __device__ Jim_Obj *JimExpandDictSugar(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_Obj *substKeyObjPtr = NULL; SetDictSubstFromAny(interp, objPtr); if (Jim_SubstObj(interp, objPtr->internalRep.dictSubstValue.indexObjPtr, &substKeyObjPtr, JIM_NONE) != JIM_OK) return NULL; Jim_IncrRefCount(substKeyObjPtr); Jim_Obj *resObjPtr = JimDictExpandArrayVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, substKeyObjPtr, 0); Jim_DecrRefCount(interp, substKeyObjPtr); return resObjPtr; } static __device__ Jim_Obj *JimExpandExprSugar(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_Obj *resultObjPtr; if (Jim_EvalExpression(interp, objPtr, &resultObjPtr) == JIM_OK) { // Note that the result has a ref count of 1, but we need a ref count of 0 resultObjPtr->refCount--; return resultObjPtr; } return NULL; } #pragma endregion // ----------------------------------------------------------------------------- // CallFrame // ----------------------------------------------------------------------------- #pragma region CallFrame static __device__ Jim_CallFrame *JimCreateCallFrame(Jim_Interp *interp, Jim_CallFrame *parent, Jim_Obj *nsObj) { Jim_CallFrame *cf; if (interp->freeFramesList) { cf = interp->freeFramesList; interp->freeFramesList = cf->next; cf->argv = NULL; cf->argc = 0; cf->procArgsObjPtr = NULL; cf->procBodyObjPtr = NULL; cf->next = NULL; cf->staticVars = NULL; cf->localCommands = NULL; cf->tailcallObj = NULL; cf->tailcallCmd = NULL; } else { cf = (Jim_CallFrame *)Jim_Alloc(sizeof(*cf)); memset(cf, 0, sizeof(*cf)); Jim_InitHashTable(&cf->vars, &JimVariablesHashTableType, interp); } cf->id = interp->callFrameEpoch++; cf->parent = parent; cf->level = (parent ? parent->level + 1 : 0); cf->nsObj = nsObj; Jim_IncrRefCount(nsObj); return cf; } static __device__ int JimDeleteLocalProcs(Jim_Interp *interp, Jim_Stack *localCommands) { // Delete any local procs if (localCommands) { Jim_Obj *cmdNameObj; while ((cmdNameObj = (Jim_Obj *)Jim_StackPop(localCommands)) != NULL) { Jim_HashTable *ht = &interp->commands; Jim_Obj *fqObjName; const char *fqname = JimQualifyName(interp, Jim_String(cmdNameObj), &fqObjName); Jim_HashEntry *he = Jim_FindHashEntry(ht, fqname); if (he) { Jim_Cmd *cmd = (Jim_Cmd *)Jim_GetHashEntryVal(he); if (cmd->prevCmd) { Jim_Cmd *prevCmd = cmd->prevCmd; cmd->prevCmd = NULL; // Delete the old command JimDecrCmdRefCount(interp, cmd); // And restore the original Jim_SetHashVal(ht, he, prevCmd); } else { Jim_DeleteHashEntry(ht, fqname); Jim_InterpIncrProcEpoch(interp); } } Jim_DecrRefCount(interp, cmdNameObj); JimFreeQualifiedName(interp, fqObjName); } Jim_FreeStack(localCommands); Jim_Free(localCommands); } return JIM_OK; } #define JIM_FCF_FULL 0 // Always free the vars hash table #define JIM_FCF_REUSE 1 // Reuse the vars hash table if possible static __device__ void JimFreeCallFrame(Jim_Interp *interp, Jim_CallFrame *cf, int action) { JimDeleteLocalProcs(interp, cf->localCommands); if (cf->procArgsObjPtr) Jim_DecrRefCount(interp, cf->procArgsObjPtr); if (cf->procBodyObjPtr) Jim_DecrRefCount(interp, cf->procBodyObjPtr); Jim_DecrRefCount(interp, cf->nsObj); if (action == JIM_FCF_FULL || cf->vars.size != JIM_HT_INITIAL_SIZE) Jim_FreeHashTable(&cf->vars); else { Jim_HashEntry **table = cf->vars.table; for (int i = 0; i < JIM_HT_INITIAL_SIZE; i++) { Jim_HashEntry *he = table[i]; while (he != NULL) { Jim_HashEntry *nextEntry = he->next; Jim_Var *varPtr = (Jim_Var *)Jim_GetHashEntryVal(he); Jim_DecrRefCount(interp, varPtr->objPtr); Jim_Free(Jim_GetHashEntryKey(he)); Jim_Free(varPtr); Jim_Free(he); table[i] = NULL; he = nextEntry; } } cf->vars.used = 0; } cf->next = interp->freeFramesList; interp->freeFramesList = cf; } #pragma endregion // ----------------------------------------------------------------------------- // References // ----------------------------------------------------------------------------- #pragma region References #ifdef JIM_REFERENCES // References HashTable Type. // Keys are unsigned long integers, dynamically allocated for now but in the future it's worth to cache this 4 bytes objects. Values are pointers to Jim_References. static __device__ void JimReferencesHTValDestructor(void *interp, void *val) { Jim_Reference *refPtr = (Jim_Reference *)val; Jim_DecrRefCount((Jim_Interp *)interp, refPtr->objPtr); if (refPtr->finalizerCmdNamePtr != NULL) Jim_DecrRefCount((Jim_Interp *)interp, refPtr->finalizerCmdNamePtr); Jim_Free(val); } static __device__ unsigned int JimReferencesHTHashFunction(const void *key) { // Only the least significant bits are used. const unsigned long *widePtr = (const unsigned long *)key; unsigned int intValue = (unsigned int)*widePtr; return Jim_IntHashFunction(intValue); } static __device__ void *JimReferencesHTKeyDup(void *privdata, const void *key) { JIM_NOTUSED(privdata); void *copy = Jim_Alloc(sizeof(unsigned long)); memcpy(copy, key, sizeof(unsigned long)); return copy; } static __device__ int JimReferencesHTKeyCompare(void *privdata, const void *key1, const void *key2) { JIM_NOTUSED(privdata); return memcmp(key1, key2, sizeof(unsigned long)) == 0; } static __device__ void JimReferencesHTKeyDestructor(void *privdata, void *key) { JIM_NOTUSED(privdata); Jim_Free(key); } __constant__ static const Jim_HashTableType JimReferencesHashTableType = { JimReferencesHTHashFunction, // hash function JimReferencesHTKeyDup, // key dup NULL, // val dup JimReferencesHTKeyCompare, // key compare JimReferencesHTKeyDestructor, // key destructor JimReferencesHTValDestructor // val destructor }; // ----------------------------------------------------------------------------- // Reference object type and References API // ----------------------------------------------------------------------------- // The string representation of references has two features in order to make the GC faster. The first is that every reference starts // with a non common character '<', in order to make the string matching faster. The second is that the reference string rep is 42 characters // in length, this means that it is not necessary to check any object with a string repr < 42, and usually there aren't many of these objects. #define JIM_REFERENCE_SPACE (35+JIM_REFERENCE_TAGLEN) static __device__ int JimFormatReference(char *buf, Jim_Reference *refPtr, unsigned long id) { const char *fmt = "<reference.<%s>.%020lu>"; sprintf(buf, fmt, refPtr->tag, id); return JIM_REFERENCE_SPACE; } static __device__ void UpdateStringOfReference(struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _referenceObjType = { "reference", NULL, NULL, UpdateStringOfReference, JIM_TYPE_REFERENCES, }; static __device__ void UpdateStringOfReference(struct Jim_Obj *objPtr) { char buf[JIM_REFERENCE_SPACE + 1]; JimFormatReference(buf, objPtr->internalRep.refValue.refPtr, objPtr->internalRep.refValue.id); JimSetStringBytes(objPtr, buf); } // returns true if 'c' is a valid reference tag character. i.e. inside the range [_a-zA-Z0-9] static __device__ int isrefchar(int c) { return (c == '_' || isalnum(c)); } static __device__ int SetReferenceFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { // Get the string representation int len; const char *str = Jim_GetString(objPtr, &len); // Check if it looks like a reference if (len < JIM_REFERENCE_SPACE) goto badformat; // Trim spaces const char *start = str; const char *end = str + len - 1; while (*start == ' ') start++; while (*end == ' ' && end > start) end--; if (end - start + 1 != JIM_REFERENCE_SPACE) goto badformat; // <reference.<1234567>.%020> if (memcmp(start, "<reference.<", 12) != 0) goto badformat; if (start[12 + JIM_REFERENCE_TAGLEN] != '>' || end[0] != '>') goto badformat; // The tag can't contain chars other than a-zA-Z0-9 + '_'. for (int i = 0; i < JIM_REFERENCE_TAGLEN; i++) if (!isrefchar(start[12 + i])) goto badformat; // Extract info from the reference. char refId[21]; memcpy(refId, start + 14 + JIM_REFERENCE_TAGLEN, 20); refId[20] = '\0'; // Try to convert the ID into an unsigned long char *endptr; unsigned long value = strtoul(refId, &endptr, 10); if (JimCheckConversion(refId, endptr) != JIM_OK) goto badformat; // Check if the reference really exists! Jim_HashEntry *he = Jim_FindHashEntry(&interp->references, &value); if (he == NULL) { Jim_SetResultFormatted(interp, "invalid reference id \"%#s\"", objPtr); return JIM_ERROR; } Jim_Reference *refPtr = (Jim_Reference *)Jim_GetHashEntryVal(he); // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_referenceObjType; objPtr->internalRep.refValue.id = value; objPtr->internalRep.refValue.refPtr = refPtr; return JIM_OK; badformat: Jim_SetResultFormatted(interp, "expected reference but got \"%#s\"", objPtr); return JIM_ERROR; } // Returns a new reference pointing to objPtr, having cmdNamePtr as finalizer command (or NULL if there is no finalizer). The returned reference object has refcount = 0. __device__ Jim_Obj *Jim_NewReference(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *tagPtr, Jim_Obj *cmdNamePtr) { // Perform the Garbage Collection if needed. Jim_CollectIfNeeded(interp); struct Jim_Reference *refPtr = (struct Jim_Reference *)Jim_Alloc(sizeof(*refPtr)); refPtr->objPtr = objPtr; Jim_IncrRefCount(objPtr); refPtr->finalizerCmdNamePtr = cmdNamePtr; if (cmdNamePtr) Jim_IncrRefCount(cmdNamePtr); unsigned long id = interp->referenceNextId++; Jim_AddHashEntry(&interp->references, &id, refPtr); Jim_Obj *refObjPtr = Jim_NewObj(interp); refObjPtr->typePtr = &_referenceObjType; refObjPtr->bytes = NULL; refObjPtr->internalRep.refValue.id = id; refObjPtr->internalRep.refValue.refPtr = refPtr; interp->referenceNextId++; // Set the tag. Trimmed at JIM_REFERENCE_TAGLEN. Everything that does not pass the 'isrefchar' test is replaced with '_' int tagLen; const char *tag = Jim_GetString(tagPtr, &tagLen); if (tagLen > JIM_REFERENCE_TAGLEN) tagLen = JIM_REFERENCE_TAGLEN; for (int i = 0; i < JIM_REFERENCE_TAGLEN; i++) if (i < tagLen && isrefchar(tag[i])) refPtr->tag[i] = tag[i]; else refPtr->tag[i] = '_'; refPtr->tag[JIM_REFERENCE_TAGLEN] = '\0'; return refObjPtr; } __device__ Jim_Reference *Jim_GetReference(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_referenceObjType && SetReferenceFromAny(interp, objPtr) == JIM_ERROR) return NULL; return objPtr->internalRep.refValue.refPtr; } __device__ int Jim_SetFinalizer(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *cmdNamePtr) { Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, objPtr)) == NULL) return JIM_ERROR; Jim_IncrRefCount(cmdNamePtr); if (refPtr->finalizerCmdNamePtr) Jim_DecrRefCount(interp, refPtr->finalizerCmdNamePtr); refPtr->finalizerCmdNamePtr = cmdNamePtr; return JIM_OK; } __device__ int Jim_GetFinalizer(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj **cmdNamePtrPtr) { Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, objPtr)) == NULL) return JIM_ERROR; *cmdNamePtrPtr = refPtr->finalizerCmdNamePtr; return JIM_OK; } // ----------------------------------------------------------------------------- // References Garbage Collection // ----------------------------------------------------------------------------- // This the hash table type for the "MARK" phase of the GC __constant__ static const Jim_HashTableType JimRefMarkHashTableType = { JimReferencesHTHashFunction, // hash function JimReferencesHTKeyDup, // key dup NULL, // val dup JimReferencesHTKeyCompare, // key compare JimReferencesHTKeyDestructor, // key destructor NULL // val destructor }; // Performs the garbage collection __device__ int Jim_Collect(Jim_Interp *interp) { int collected = 0; #ifndef JIM_BOOTSTRAP // Avoid recursive calls if (interp->lastCollectId == -1) return 0; // Jim_Collect() already running. Return just now. interp->lastCollectId = -1; // Mark all the references found into the 'mark' hash table. The references are searched in every live object that is of a type that can contain references. Jim_HashTable marks; Jim_InitHashTable(&marks, &JimRefMarkHashTableType, NULL); Jim_Obj *objPtr = interp->liveList; while (objPtr) { if (objPtr->typePtr == NULL || objPtr->typePtr->flags & JIM_TYPE_REFERENCES) { // If the object is of type reference, to get the Id is simple... if (objPtr->typePtr == &_referenceObjType) { Jim_AddHashEntry(&marks, &objPtr->internalRep.refValue.id, NULL); #ifdef JIM_DEBUG_GC printf("MARK (reference): %d refcount: %d\n", (int)objPtr->internalRep.refValue.id, objPtr->refCount); #endif objPtr = objPtr->nextObjPtr; continue; } // Get the string repr of the object we want to scan for references. const char *str, *p; int len; p = str = Jim_GetString(objPtr, &len); // Skip objects too little to contain references. if (len < JIM_REFERENCE_SPACE) { objPtr = objPtr->nextObjPtr; continue; } // Extract references from the object string repr. while (1) { if ((p = strstr(p, "<reference.<")) == NULL) break; // Check if it's a valid reference. if (len - (p - str) < JIM_REFERENCE_SPACE) break; if (p[41] != '>' || p[19] != '>' || p[20] != '.') break; for (int i = 21; i <= 40; i++) if (!isdigit(p[i])) break; // Get the ID unsigned long id = strtoul(p + 21, NULL, 10); // Ok, a reference for the given ID was found. Mark it. Jim_AddHashEntry(&marks, &id, NULL); #ifdef JIM_DEBUG_GC printf("MARK: %d\n", (int)id); #endif p += JIM_REFERENCE_SPACE; } } objPtr = objPtr->nextObjPtr; } // Run the references hash table to destroy every reference that is not referenced outside (not present in the mark HT). Jim_HashEntry *he; Jim_HashTableIterator htiter; JimInitHashTableIterator(&interp->references, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) { const unsigned long *refId = (const unsigned long *)he->key; // Check if in the mark phase we encountered this reference. if (Jim_FindHashEntry(&marks, refId) == NULL) { #ifdef JIM_DEBUG_GC printf("COLLECTING %d\n", (int)*refId); #endif collected++; // Drop the reference, but call the finalizer first if registered. Jim_Reference *refPtr = (Jim_Reference *)Jim_GetHashEntryVal(he); if (refPtr->finalizerCmdNamePtr) { char *refstr = (char *)Jim_Alloc(JIM_REFERENCE_SPACE + 1); Jim_Obj *objv[3], *oldResult; JimFormatReference(refstr, refPtr, *refId); objv[0] = refPtr->finalizerCmdNamePtr; objv[1] = Jim_NewStringObjNoAlloc(interp, refstr, JIM_REFERENCE_SPACE); objv[2] = refPtr->objPtr; // Drop the reference itself. Avoid the finaliser being freed here Jim_IncrRefCount(objv[0]); // Don't remove the reference from the hash table just yet since that will free refPtr, and hence refPtr->objPtr // Call the finalizer. Errors ignored. (should we use bgerror?) oldResult = interp->result; Jim_IncrRefCount(oldResult); Jim_EvalObjVector(interp, 3, objv); Jim_SetResult(interp, oldResult); Jim_DecrRefCount(interp, oldResult); Jim_DecrRefCount(interp, objv[0]); } Jim_DeleteHashEntry(&interp->references, refId); } } Jim_FreeHashTable(&marks); interp->lastCollectId = interp->referenceNextId; interp->lastCollectTime = time(NULL); #endif // JIM_BOOTSTRAP return collected; } #define JIM_COLLECT_ID_PERIOD 5000 #define JIM_COLLECT_TIME_PERIOD 300 __device__ void Jim_CollectIfNeeded(Jim_Interp *interp) { unsigned long elapsedId = interp->referenceNextId - interp->lastCollectId; int elapsedTime = (int)(time(NULL) - interp->lastCollectTime); if (elapsedId > JIM_COLLECT_ID_PERIOD || elapsedTime > JIM_COLLECT_TIME_PERIOD) Jim_Collect(interp); } #endif #pragma endregion __device__ int Jim_IsBigEndian(void) { union { unsigned short s; unsigned char c[2]; } uval = {0x0102}; return uval.c[0] == 1; } // ----------------------------------------------------------------------------- // Interpreter related functions // ----------------------------------------------------------------------------- #pragma region Interpreter related functions __device__ Jim_Interp *Jim_CreateInterp() { Jim_Interp *i = (Jim_Interp *)Jim_Alloc(sizeof(*i)); memset(i, 0, sizeof(*i)); i->maxCallFrameDepth = JIM_MAX_CALLFRAME_DEPTH; i->maxEvalDepth = JIM_MAX_EVAL_DEPTH; i->lastCollectTime = time(NULL); // Note that we can create objects only after the interpreter liveList and freeList pointers are initialized to NULL. Jim_InitHashTable(&i->commands, &JimCommandsHashTableType, i); #ifdef JIM_REFERENCES Jim_InitHashTable(&i->references, &JimReferencesHashTableType, i); #endif Jim_InitHashTable(&i->assocData, &JimAssocDataHashTableType, i); Jim_InitHashTable(&i->packages, &JimPackageHashTableType, NULL); i->emptyObj = Jim_NewEmptyStringObj(i); i->trueObj = Jim_NewIntObj(i, 1); i->falseObj = Jim_NewIntObj(i, 0); i->framePtr = i->topFramePtr = JimCreateCallFrame(i, NULL, i->emptyObj); i->errorFileNameObj = i->emptyObj; i->result = i->emptyObj; i->stackTrace = Jim_NewListObj(i, NULL, 0); i->unknown = Jim_NewStringObj(i, "unknown", -1); i->errorProc = i->emptyObj; i->currentScriptObj = Jim_NewEmptyStringObj(i); i->nullScriptObj = Jim_NewEmptyStringObj(i); Jim_IncrRefCount(i->emptyObj); Jim_IncrRefCount(i->errorFileNameObj); Jim_IncrRefCount(i->result); Jim_IncrRefCount(i->stackTrace); Jim_IncrRefCount(i->unknown); Jim_IncrRefCount(i->currentScriptObj); Jim_IncrRefCount(i->nullScriptObj); Jim_IncrRefCount(i->errorProc); Jim_IncrRefCount(i->trueObj); Jim_IncrRefCount(i->falseObj); // Initialize key variables every interpreter should contain Jim_SetVariableStrWithStr(i, JIM_LIBPATH, TCL_LIBRARY); Jim_SetVariableStrWithStr(i, JIM_INTERACTIVE, "0"); Jim_SetVariableStrWithStr(i, "tcl_platform(os)", TCL_PLATFORM_OS); Jim_SetVariableStrWithStr(i, "tcl_platform(platform)", TCL_PLATFORM_PLATFORM); Jim_SetVariableStrWithStr(i, "tcl_platform(pathSeparator)", TCL_PLATFORM_PATH_SEPARATOR); Jim_SetVariableStrWithStr(i, "tcl_platform(byteOrder)", Jim_IsBigEndian() ? "bigEndian" : "littleEndian"); Jim_SetVariableStrWithStr(i, "tcl_platform(threaded)", "0"); Jim_SetVariableStr(i, "tcl_platform(pointerSize)", Jim_NewIntObj(i, sizeof(void *))); Jim_SetVariableStr(i, "tcl_platform(wordSize)", Jim_NewIntObj(i, sizeof(jim_wide))); return i; } __device__ void Jim_FreeInterp(Jim_Interp *i) { // Free the active call frames list - must be done before i->commands is destroyed Jim_CallFrame *cf, *cfx; for (cf = i->framePtr; cf; cf = cfx) { cfx = cf->parent; JimFreeCallFrame(i, cf, JIM_FCF_FULL); } Jim_DecrRefCount(i, i->emptyObj); Jim_DecrRefCount(i, i->trueObj); Jim_DecrRefCount(i, i->falseObj); Jim_DecrRefCount(i, i->result); Jim_DecrRefCount(i, i->stackTrace); Jim_DecrRefCount(i, i->errorProc); Jim_DecrRefCount(i, i->unknown); Jim_DecrRefCount(i, i->errorFileNameObj); Jim_DecrRefCount(i, i->currentScriptObj); Jim_DecrRefCount(i, i->nullScriptObj); Jim_FreeHashTable(&i->commands); #ifdef JIM_REFERENCES Jim_FreeHashTable(&i->references); #endif Jim_FreeHashTable(&i->packages); Jim_Free(i->prngState); Jim_FreeHashTable(&i->assocData); // Check that the live object list is empty, otherwise there is a memory leak. Jim_Obj *objPtr, *nextObjPtr; #ifdef JIM_MAINTAINER if (i->liveList != NULL) { objPtr = i->liveList; printf("\n-------------------------------------\n"); printf("Objects still in the free list:\n"); while (objPtr) { const char *type = (objPtr->typePtr ? objPtr->typePtr->name : "string"); if (objPtr->bytes && strlen(objPtr->bytes) > 20) printf("%p (%d) %-10s: '%.20s...'\n", (void *)objPtr, objPtr->refCount, type, objPtr->bytes); else printf("%p (%d) %-10s: '%s'\n", (void *)objPtr, objPtr->refCount, type, objPtr->bytes ? objPtr->bytes : "(null)"); if (objPtr->typePtr == &_sourceObjType) printf("FILE %s LINE %d\n", Jim_String(objPtr->internalRep.sourceValue.fileNameObj), objPtr->internalRep.sourceValue.lineNumber); objPtr = objPtr->nextObjPtr; } printf("-------------------------------------\n\n"); JimPanic(1, "Live list non empty freeing the interpreter! Leak?"); } #endif // Free all the freed objects. objPtr = i->freeList; while (objPtr) { nextObjPtr = objPtr->nextObjPtr; Jim_Free(objPtr); objPtr = nextObjPtr; } // Free the free call frames list for (cf = i->freeFramesList; cf; cf = cfx) { cfx = cf->next; if (cf->vars.table) Jim_FreeHashTable(&cf->vars); Jim_Free(cf); } // Free the interpreter structure. Jim_Free(i); } // Returns the call frame relative to the level represented by levelObjPtr. If levelObjPtr == NULL, the level is assumed to be '1'. // This function accepts the 'level' argument in the form of the commands [uplevel] and [upvar]. // Returns NULL on error. // Note: for a function accepting a relative integer as level suitable for implementation of [info level ?level?], see JimGetCallFrameByInteger() __device__ Jim_CallFrame *Jim_GetCallFrameByLevel(Jim_Interp *interp, Jim_Obj *levelObjPtr) { long level; const char *str; if (levelObjPtr) { str = Jim_String(levelObjPtr); if (str[0] == '#') { char *endptr; level = jim_strtol(str + 1, &endptr); if (str[1] == '\0' || endptr[0] != '\0') level = -1; } // Convert from a relative to an absolute level else level = (Jim_GetLong(interp, levelObjPtr, &level) != JIM_OK || level < 0 ? -1 : interp->framePtr->level - level); } else { str = "1"; // Needed to format the error message. level = interp->framePtr->level - 1; } if (level == 0) return interp->topFramePtr; // Lookup if (level > 0) for (Jim_CallFrame *framePtr = interp->framePtr; framePtr; framePtr = framePtr->parent) if (framePtr->level == level) return framePtr; Jim_SetResultFormatted(interp, "bad level \"%s\"", str); return NULL; } // Similar to Jim_GetCallFrameByLevel() but the level is specified as a relative integer like in the [info level ?level?] command. static __device__ Jim_CallFrame *JimGetCallFrameByInteger(Jim_Interp *interp, Jim_Obj *levelObjPtr) { long level; if (Jim_GetLong(interp, levelObjPtr, &level) == JIM_OK) { // Convert from a relative to an absolute level if (level <= 0) level = interp->framePtr->level + level; if (level == 0) return interp->topFramePtr; // Lookup for (Jim_CallFrame *framePtr = interp->framePtr; framePtr; framePtr = framePtr->parent) if (framePtr->level == level) return framePtr; } Jim_SetResultFormatted(interp, "bad level \"%#s\"", levelObjPtr); return NULL; } static __device__ void JimResetStackTrace(Jim_Interp *interp) { Jim_DecrRefCount(interp, interp->stackTrace); interp->stackTrace = Jim_NewListObj(interp, NULL, 0); Jim_IncrRefCount(interp->stackTrace); } static __device__ void JimSetStackTrace(Jim_Interp *interp, Jim_Obj *stackTraceObj) { // Increment reference first in case these are the same object Jim_IncrRefCount(stackTraceObj); Jim_DecrRefCount(interp, interp->stackTrace); interp->stackTrace = stackTraceObj; interp->errorFlag = 1; // This is a bit ugly. If the filename of the last entry of the stack trace is empty, the next stack level should be added. int len = Jim_ListLength(interp, interp->stackTrace); if (len >= 3) if (Jim_Length(Jim_ListGetIndex(interp, interp->stackTrace, len - 2)) == 0) interp->addStackTrace = 1; } static __device__ void JimAppendStackTrace(Jim_Interp *interp, const char *procname, Jim_Obj *fileNameObj, int linenr) { if (!strcmp(procname, "unknown")) procname = ""; if (!*procname && !Jim_Length(fileNameObj)) return; // No useful info here if (Jim_IsShared(interp->stackTrace)) { Jim_DecrRefCount(interp, interp->stackTrace); interp->stackTrace = Jim_DuplicateObj(interp, interp->stackTrace); Jim_IncrRefCount(interp->stackTrace); } // If we have no procname but the previous element did, merge with that frame if (!*procname && Jim_Length(fileNameObj)) { // Just a filename. Check the previous entry int len = Jim_ListLength(interp, interp->stackTrace); if (len >= 3) { Jim_Obj *objPtr = Jim_ListGetIndex(interp, interp->stackTrace, len - 3); if (Jim_Length(objPtr)) { // Yes, the previous level had procname objPtr = Jim_ListGetIndex(interp, interp->stackTrace, len - 2); if (Jim_Length(objPtr) == 0) { // But no filename, so merge the new info with that frame ListSetIndex(interp, interp->stackTrace, len - 2, fileNameObj, 0); ListSetIndex(interp, interp->stackTrace, len - 1, Jim_NewIntObj(interp, linenr), 0); return; } } } } Jim_ListAppendElement(interp, interp->stackTrace, Jim_NewStringObj(interp, procname, -1)); Jim_ListAppendElement(interp, interp->stackTrace, fileNameObj); Jim_ListAppendElement(interp, interp->stackTrace, Jim_NewIntObj(interp, linenr)); } __device__ int Jim_SetAssocData(Jim_Interp *interp, const char *key, Jim_InterpDeleteProc * delProc, void *data) { AssocDataValue *assocEntryPtr = (AssocDataValue *)Jim_Alloc(sizeof(AssocDataValue)); assocEntryPtr->delProc = delProc; assocEntryPtr->data = data; return Jim_AddHashEntry(&interp->assocData, key, assocEntryPtr); } __device__ void *Jim_GetAssocData(Jim_Interp *interp, const char *key) { Jim_HashEntry *entryPtr = Jim_FindHashEntry(&interp->assocData, key); if (entryPtr != NULL) { AssocDataValue *assocEntryPtr = (AssocDataValue *)Jim_GetHashEntryVal(entryPtr); return assocEntryPtr->data; } return NULL; } __device__ int Jim_DeleteAssocData(Jim_Interp *interp, const char *key) { return Jim_DeleteHashEntry(&interp->assocData, key); } __device__ int Jim_GetExitCode(Jim_Interp *interp) { return interp->exitCode; } #pragma endregion // ----------------------------------------------------------------------------- // Integer object // ----------------------------------------------------------------------------- #pragma region Integer object static __device__ void UpdateStringOfInt(struct Jim_Obj *objPtr); static __device__ int SetIntFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags); __constant__ static const Jim_ObjType _intObjType = { "int", NULL, NULL, UpdateStringOfInt, JIM_TYPE_NONE, }; // A coerced double is closer to an int than a double. It is an int value temporarily masquerading as a double value. // i.e. it has the same string value as an int and Jim_GetWide() succeeds, but also Jim_GetDouble() returns the value directly. __constant__ static const Jim_ObjType _coercedDoubleObjType = { "coerced-double", NULL, NULL, UpdateStringOfInt, JIM_TYPE_NONE, }; static __device__ void UpdateStringOfInt(struct Jim_Obj *objPtr) { char buf[JIM_INTEGER_SPACE + 1]; jim_wide wideValue = JimWideValue(objPtr); int pos = 0; if (wideValue == 0) buf[pos++] = '0'; else { char tmp[JIM_INTEGER_SPACE]; int num = 0; int i; if (wideValue < 0) { buf[pos++] = '-'; i = wideValue % 10; // C89 is implementation defined as to whether (-106 % 10) is -6 or 4, whereas C99 is always -6. coverity[dead_error_line] tmp[num++] = (i > 0 ? (10 - i) : -i); wideValue /= -10; } while (wideValue) { tmp[num++] = wideValue % 10; wideValue /= 10; } for (i = 0; i < num; i++) buf[pos++] = '0' + tmp[num - i - 1]; } buf[pos] = 0; JimSetStringBytes(objPtr, buf); } static __device__ int SetIntFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { if (objPtr->typePtr == &_coercedDoubleObjType) { // Simple switch objPtr->typePtr = &_intObjType; return JIM_OK; } // Get the string representation const char *str = Jim_String(objPtr); // Try to convert into a jim_wide jim_wide wideValue; if (Jim_StringToWide(str, &wideValue, 0) != JIM_OK) { if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "expected integer but got \"%#s\"", objPtr); return JIM_ERROR; } if ((wideValue == JIM_WIDE_MIN || wideValue == JIM_WIDE_MAX) && errno == ERANGE) { Jim_SetResultString(interp, "Integer value too big to be represented", -1); return JIM_ERROR; } // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_intObjType; objPtr->internalRep.wideValue = wideValue; return JIM_OK; } #ifdef JIM_OPTIMIZATION static __device__ int JimIsWide(Jim_Obj *objPtr) { return objPtr->typePtr == &_intObjType; } #endif __device__ int Jim_GetWide(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide *widePtr) { if (objPtr->typePtr != &_intObjType && SetIntFromAny(interp, objPtr, JIM_ERRMSG) == JIM_ERROR) return JIM_ERROR; *widePtr = JimWideValue(objPtr); return JIM_OK; } // Get a wide but does not set an error if the format is bad. static __device__ int JimGetWideNoErr(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide * widePtr) { if (objPtr->typePtr != &_intObjType && SetIntFromAny(interp, objPtr, JIM_NONE) == JIM_ERROR) return JIM_ERROR; *widePtr = JimWideValue(objPtr); return JIM_OK; } __device__ int Jim_GetLong(Jim_Interp *interp, Jim_Obj *objPtr, long *longPtr) { jim_wide wideValue; int retval = Jim_GetWide(interp, objPtr, &wideValue); if (retval == JIM_OK) { *longPtr = (long)wideValue; return JIM_OK; } return JIM_ERROR; } __device__ Jim_Obj *Jim_NewIntObj(Jim_Interp *interp, jim_wide wideValue) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_intObjType; objPtr->bytes = NULL; objPtr->internalRep.wideValue = wideValue; return objPtr; } #pragma endregion // ----------------------------------------------------------------------------- // Double object // ----------------------------------------------------------------------------- #pragma region Double object #define JIM_DOUBLE_SPACE 30 static __device__ void UpdateStringOfDouble(struct Jim_Obj *objPtr); static __device__ int SetDoubleFromAny(Jim_Interp *interp, Jim_Obj *objPtr); __constant__ static const Jim_ObjType _doubleObjType = { "double", NULL, NULL, UpdateStringOfDouble, JIM_TYPE_NONE, }; #ifndef HAVE_ISNAN #undef isnan #define isnan(X) ((X) != (X)) #endif #ifndef HAVE_ISINF #undef isinf #define isinf(X) (1.0 / (X) == 0.0) #endif static __device__ void UpdateStringOfDouble(struct Jim_Obj *objPtr) { double value = objPtr->internalRep.doubleValue; if (isnan(value)) { JimSetStringBytes(objPtr, "NaN"); return; } if (isinf(value)) { if (value < 0) JimSetStringBytes(objPtr, "-Inf"); else JimSetStringBytes(objPtr, "Inf"); return; } { char buf[JIM_DOUBLE_SPACE + 1]; int len = sprintf(buf, "%.12g", value); // Add a final ".0" if necessary int i; for (i = 0; i < len; i++) { if (buf[i] == '.' || buf[i] == 'e') { #ifdef JIM_SPRINTF_DOUBLE_NEEDS_FIX // If 'buf' ends in e-0nn or e+0nn, remove the 0 after the + or - and reduce the length by 1 char *e = (char *)strchr(buf, 'e'); if (e && (e[1] == '-' || e[1] == '+') && e[2] == '0') { // Move it up e += 2; memmove(e, e + 1, len - (e - buf)); } #endif break; } } if (buf[i] == '\0') { buf[i++] = '.'; buf[i++] = '0'; buf[i] = '\0'; } JimSetStringBytes(objPtr, buf); } } static __device__ int SetDoubleFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { double doubleValue; jim_wide wideValue; // Preserve the string representation. Needed so we can convert back to int without loss const char *str = Jim_String(objPtr); #ifdef HAVE_LONG_LONG // Assume a 53 bit mantissa #define MIN_INT_IN_DOUBLE -(1LL << 53) #define MAX_INT_IN_DOUBLE -(MIN_INT_IN_DOUBLE + 1) if (objPtr->typePtr == &_intObjType && JimWideValue(objPtr) >= MIN_INT_IN_DOUBLE && JimWideValue(objPtr) <= MAX_INT_IN_DOUBLE) { // Direct conversion to coerced double objPtr->typePtr = &_coercedDoubleObjType; return JIM_OK; } else #endif if (Jim_StringToWide(str, &wideValue, 10) == JIM_OK) { // Managed to convert to an int, so we can use this as a cooerced double Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_coercedDoubleObjType; objPtr->internalRep.wideValue = wideValue; return JIM_OK; } else { // Try to convert into a double if (Jim_StringToDouble(str, &doubleValue) != JIM_OK) { Jim_SetResultFormatted(interp, "expected floating-point number but got \"%#s\"", objPtr); return JIM_ERROR; } // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); } objPtr->typePtr = &_doubleObjType; objPtr->internalRep.doubleValue = doubleValue; return JIM_OK; } __device__ int Jim_GetDouble(Jim_Interp *interp, Jim_Obj *objPtr, double *doublePtr) { if (objPtr->typePtr == &_coercedDoubleObjType) { *doublePtr = (double)JimWideValue(objPtr); return JIM_OK; } if (objPtr->typePtr != &_doubleObjType && SetDoubleFromAny(interp, objPtr) == JIM_ERROR) return JIM_ERROR; *doublePtr = (objPtr->typePtr == &_coercedDoubleObjType ? (double)JimWideValue(objPtr) : objPtr->internalRep.doubleValue); return JIM_OK; } __device__ Jim_Obj *Jim_NewDoubleObj(Jim_Interp *interp, double doubleValue) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_doubleObjType; objPtr->bytes = NULL; objPtr->internalRep.doubleValue = doubleValue; return objPtr; } #pragma endregion // ----------------------------------------------------------------------------- // List object // ----------------------------------------------------------------------------- #pragma region List object static __device__ void ListInsertElements(Jim_Obj *listPtr, int idx, int elemc, Jim_Obj *const *elemVec); static __device__ void ListAppendElement(Jim_Obj *listPtr, Jim_Obj *objPtr); static __device__ void FreeListInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupListInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void UpdateStringOfList(struct Jim_Obj *objPtr); static __device__ int SetListFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); // Note that while the elements of the list may contain references, the list object itself can't. This basically means that the // list object string representation as a whole can't contain references that are not presents in the single elements. */ __constant__ static const Jim_ObjType _listObjType = { "list", FreeListInternalRep, DupListInternalRep, UpdateStringOfList, JIM_TYPE_NONE, }; __device__ void FreeListInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { for (int i = 0; i < objPtr->internalRep.listValue.len; i++) Jim_DecrRefCount(interp, objPtr->internalRep.listValue.ele[i]); Jim_Free(objPtr->internalRep.listValue.ele); } __device__ void DupListInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); dupPtr->internalRep.listValue.len = srcPtr->internalRep.listValue.len; dupPtr->internalRep.listValue.maxLen = srcPtr->internalRep.listValue.maxLen; dupPtr->internalRep.listValue.ele = (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * srcPtr->internalRep.listValue.maxLen); memcpy(dupPtr->internalRep.listValue.ele, srcPtr->internalRep.listValue.ele, sizeof(Jim_Obj *) * srcPtr->internalRep.listValue.len); for (int i = 0; i < dupPtr->internalRep.listValue.len; i++) Jim_IncrRefCount(dupPtr->internalRep.listValue.ele[i]); dupPtr->typePtr = &_listObjType; } // The following function checks if a given string can be encoded into a list element without any kind of quoting, surrounded by braces, or using escapes to quote. #define JIM_ELESTR_SIMPLE 0 #define JIM_ELESTR_BRACE 1 #define JIM_ELESTR_QUOTE 2 static __device__ unsigned char ListElementQuotingType(const char *s, int len) { int i, level, blevel, trySimple = 1; // Try with the SIMPLE case if (len == 0) return JIM_ELESTR_BRACE; if (s[0] == '"' || s[0] == '{') { trySimple = 0; goto testbrace; } for (i = 0; i < len; i++) switch (s[i]) { case ' ': case '$': case '"': case '[': case ']': case ';': case '\\': case '\r': case '\n': case '\t': case '\f': case '\v': trySimple = 0; case '{': case '}': goto testbrace; } return JIM_ELESTR_SIMPLE; testbrace: // Test if it's possible to do with braces if (s[len - 1] == '\\') return JIM_ELESTR_QUOTE; level = 0; blevel = 0; for (i = 0; i < len; i++) { switch (s[i]) { case '{': level++; break; case '}': level--; if (level < 0) return JIM_ELESTR_QUOTE; break; case '[': blevel++; break; case ']': blevel--; break; case '\\': if (s[i + 1] == '\n') return JIM_ELESTR_QUOTE; else if (s[i + 1] != '\0') i++; break; } } if (blevel < 0) return JIM_ELESTR_QUOTE; if (level == 0) { if (!trySimple) return JIM_ELESTR_BRACE; for (i = 0; i < len; i++) switch (s[i]) { case ' ': case '$': case '"': case '[': case ']': case ';': case '\\': case '\r': case '\n': case '\t': case '\f': case '\v': return JIM_ELESTR_BRACE; } return JIM_ELESTR_SIMPLE; } return JIM_ELESTR_QUOTE; } // Backslashes-escapes the null-terminated string 's' into the buffer at 'q' The buffer must be at least strlen(s) * 2 + 1 bytes long for the worst-case scenario. // Returns the length of the result. static __device__ int BackslashQuoteString(const char *s, int len, char *q) { char *p = q; while (len--) switch (*s) { case ' ': case '$': case '"': case '[': case ']': case '{': case '}': case ';': case '\\': *p++ = '\\'; *p++ = *s++; break; case '\n': *p++ = '\\'; *p++ = 'n'; s++; break; case '\r': *p++ = '\\'; *p++ = 'r'; s++; break; case '\t': *p++ = '\\'; *p++ = 't'; s++; break; case '\f': *p++ = '\\'; *p++ = 'f'; s++; break; case '\v': *p++ = '\\'; *p++ = 'v'; s++; break; default: *p++ = *s++; break; } *p = '\0'; return (int)(p - q); } static __device__ void JimMakeListStringRep(Jim_Obj *objPtr, Jim_Obj **objv, int objc) { #define STATIC_QUOTING_LEN 32 int i; const char *strRep; // Estimate the space needed unsigned char staticQuoting[STATIC_QUOTING_LEN]; unsigned char *quotingType = (objc > STATIC_QUOTING_LEN ? (unsigned char *)Jim_Alloc(objc) : staticQuoting); int bufLen = 0; for (i = 0; i < objc; i++) { int len; strRep = Jim_GetString(objv[i], &len); quotingType[i] = ListElementQuotingType(strRep, len); switch (quotingType[i]) { case JIM_ELESTR_SIMPLE: if (i != 0 || strRep[0] != '#') { bufLen += len; break; } // Special case '#' on first element needs braces quotingType[i] = JIM_ELESTR_BRACE; // fall through case JIM_ELESTR_BRACE: bufLen += len + 2; break; case JIM_ELESTR_QUOTE: bufLen += len * 2; break; } bufLen++; // elements separator } bufLen++; // Generate the string rep char *p = objPtr->bytes = (char *)Jim_Alloc(bufLen + 1); int realLength = 0; for (i = 0; i < objc; i++) { int len, qlen; strRep = Jim_GetString(objv[i], &len); switch (quotingType[i]) { case JIM_ELESTR_SIMPLE: memcpy(p, strRep, len); p += len; realLength += len; break; case JIM_ELESTR_BRACE: *p++ = '{'; memcpy(p, strRep, len); p += len; *p++ = '}'; realLength += len + 2; break; case JIM_ELESTR_QUOTE: if (i == 0 && strRep[0] == '#') { *p++ = '\\'; realLength++; } qlen = BackslashQuoteString(strRep, len, p); p += qlen; realLength += qlen; break; } // Add a separating space if (i + 1 != objc) { *p++ = ' '; realLength++; } } *p = '\0'; // nul term objPtr->length = realLength; if (quotingType != staticQuoting) Jim_Free(quotingType); } static __device__ void UpdateStringOfList(struct Jim_Obj *objPtr) { JimMakeListStringRep(objPtr, objPtr->internalRep.listValue.ele, objPtr->internalRep.listValue.len); } static __device__ int SetListFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { if (objPtr->typePtr == &_listObjType) return JIM_OK; // Optimise dict -> list for object with no string rep. Note that this may only save a little time, but it also preserves any source location of the dict elements which can be very useful if (Jim_IsDict(objPtr) && objPtr->bytes == NULL) { int len; Jim_Obj **listObjPtrPtr = JimDictPairs(objPtr, &len); for (int i = 0; i < len; i++) Jim_IncrRefCount(listObjPtrPtr[i]); // Now just switch the internal rep Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_listObjType; objPtr->internalRep.listValue.len = len; objPtr->internalRep.listValue.maxLen = len; objPtr->internalRep.listValue.ele = listObjPtrPtr; return JIM_OK; } // Try to preserve information about filename / line number Jim_Obj *fileNameObj; int linenr; if (objPtr->typePtr == &_sourceObjType) { fileNameObj = objPtr->internalRep.sourceValue.fileNameObj; linenr = objPtr->internalRep.sourceValue.lineNumber; } else { fileNameObj = interp->emptyObj; linenr = 1; } Jim_IncrRefCount(fileNameObj); // Get the string representation int strLen; const char *str = Jim_GetString(objPtr, &strLen); // Free the old internal repr just now and initialize the new one just now. The string->list conversion can't fail. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_listObjType; objPtr->internalRep.listValue.len = 0; objPtr->internalRep.listValue.maxLen = 0; objPtr->internalRep.listValue.ele = NULL; // Convert into a list if (strLen) { struct JimParserCtx parser; JimParserInit(&parser, str, strLen, linenr); while (!parser.eof) { JimParseList(&parser); if (parser.tt != JIM_TT_STR && parser.tt != JIM_TT_ESC) continue; Jim_Obj *elementPtr = JimParserGetTokenObj(interp, &parser); JimSetSourceInfo(interp, elementPtr, fileNameObj, parser.tline); ListAppendElement(objPtr, elementPtr); } } Jim_DecrRefCount(interp, fileNameObj); return JIM_OK; } __device__ Jim_Obj *Jim_NewListObj(Jim_Interp *interp, Jim_Obj *const *elements, int len) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_listObjType; objPtr->bytes = NULL; objPtr->internalRep.listValue.ele = NULL; objPtr->internalRep.listValue.len = 0; objPtr->internalRep.listValue.maxLen = 0; if (len) ListInsertElements(objPtr, 0, len, elements); return objPtr; } // Return a vector of Jim_Obj with the elements of a Jim list, and the length of the vector. Note that the user of this function should make // sure that the list object can't shimmer while the vector returned is in use, this vector is the one stored inside the internal representation // of the list object. This function is not exported, extensions should always access to the List object elements using Jim_ListIndex(). static __device__ void JimListGetElements(Jim_Interp *interp, Jim_Obj *listObj, int *listLen, Jim_Obj ***listVec) { *listLen = Jim_ListLength(interp, listObj); *listVec = listObj->internalRep.listValue.ele; } // Sorting uses ints, but commands may return wide static __device__ int JimSign(jim_wide w) { if (w == 0) return 0; else if (w < 0) return -1; return 1; } // ListSortElements type values struct lsort_info { jmp_buf jmpbuf; Jim_Obj *command; Jim_Interp *interp; enum { JIM_LSORT_ASCII, JIM_LSORT_NOCASE, JIM_LSORT_INTEGER, JIM_LSORT_REAL, JIM_LSORT_COMMAND } type; int order; int index; int indexed; int unique; int (*subfn)(Jim_Obj **, Jim_Obj **); }; static __device__ struct lsort_info *sort_info; static __device__ int ListSortIndexHelper(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { Jim_Obj *lObj, *rObj; if (Jim_ListIndex(sort_info->interp, *lhsObj, sort_info->index, &lObj, JIM_ERRMSG) != JIM_OK || Jim_ListIndex(sort_info->interp, *rhsObj, sort_info->index, &rObj, JIM_ERRMSG) != JIM_OK) longjmp(sort_info->jmpbuf, JIM_ERROR); return sort_info->subfn(&lObj, &rObj); } // Sort the internal rep of a list. static __device__ int ListSortString(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { return Jim_StringCompareObj(sort_info->interp, *lhsObj, *rhsObj, 0) * sort_info->order; } static __device__ int ListSortStringNoCase(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { return Jim_StringCompareObj(sort_info->interp, *lhsObj, *rhsObj, 1) * sort_info->order; } static __device__ int ListSortInteger(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { jim_wide lhs = 0, rhs = 0; if (Jim_GetWide(sort_info->interp, *lhsObj, &lhs) != JIM_OK || Jim_GetWide(sort_info->interp, *rhsObj, &rhs) != JIM_OK) longjmp(sort_info->jmpbuf, JIM_ERROR); return JimSign(lhs - rhs) * sort_info->order; } static __device__ int ListSortReal(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { double lhs = 0, rhs = 0; if (Jim_GetDouble(sort_info->interp, *lhsObj, &lhs) != JIM_OK || Jim_GetDouble(sort_info->interp, *rhsObj, &rhs) != JIM_OK) longjmp(sort_info->jmpbuf, JIM_ERROR); if (lhs == rhs) return 0; if (lhs > rhs) return sort_info->order; return -sort_info->order; } static __device__ int ListSortCommand(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { // This must be a valid list Jim_Obj *compare_script = Jim_DuplicateObj(sort_info->interp, sort_info->command); Jim_ListAppendElement(sort_info->interp, compare_script, *lhsObj); Jim_ListAppendElement(sort_info->interp, compare_script, *rhsObj); int rc = Jim_EvalObj(sort_info->interp, compare_script); jim_wide ret = 0; if (rc != JIM_OK || Jim_GetWide(sort_info->interp, Jim_GetResult(sort_info->interp), &ret) != JIM_OK) longjmp(sort_info->jmpbuf, rc); return JimSign(ret) * sort_info->order; } // Remove duplicate elements from the (sorted) list in-place, according to the comparison function, comp. // Note that the last unique value is kept, not the first static __device__ void ListRemoveDuplicates(Jim_Obj *listObjPtr, int (*comp)(Jim_Obj **lhs, Jim_Obj **rhs)) { int src; int dst = 0; Jim_Obj **ele = listObjPtr->internalRep.listValue.ele; for (src = 1; src < listObjPtr->internalRep.listValue.len; src++) { // Match, so replace the dest with the current source if (comp(&ele[dst], &ele[src]) == 0) Jim_DecrRefCount(sort_info->interp, ele[dst]); // No match, so keep the current source and move to the next destination else dst++; ele[dst] = ele[src]; } // At end of list, keep the final element ele[++dst] = ele[src]; // Set the new length listObjPtr->internalRep.listValue.len = dst; } // Sort a list *in place*. MUST be called with a non-shared list. static __device__ int ListSortElements(Jim_Interp *interp, Jim_Obj *listObjPtr, struct lsort_info *info) { JimPanic(Jim_IsShared(listObjPtr), "ListSortElements called with shared object"); SetListFromAny(interp, listObjPtr); // Allow lsort to be called reentrantly struct lsort_info *prev_info = sort_info; sort_info = info; Jim_Obj **vector = listObjPtr->internalRep.listValue.ele; int len = listObjPtr->internalRep.listValue.len; typedef int (qsort_comparator)(const void*,const void*); int (*fn)(Jim_Obj**,Jim_Obj**); switch (info->type) { case lsort_info::JIM_LSORT_ASCII: fn = ListSortString; break; case lsort_info::JIM_LSORT_NOCASE: fn = ListSortStringNoCase; break; case lsort_info::JIM_LSORT_INTEGER: fn = ListSortInteger; break; case lsort_info::JIM_LSORT_REAL: fn = ListSortReal; break; case lsort_info::JIM_LSORT_COMMAND: fn = ListSortCommand; break; default: fn = NULL; // avoid warning JimPanic(1, "ListSort called with invalid sort type"); } if (info->indexed) { // Need to interpose a "list index" function info->subfn = fn; fn = ListSortIndexHelper; } int rc; if ((rc = setjmp(info->jmpbuf)) == 0) { qsort(vector, len, sizeof(Jim_Obj *), (qsort_comparator *)fn); if (info->unique && len > 1) ListRemoveDuplicates(listObjPtr, fn); Jim_InvalidateStringRep(listObjPtr); } sort_info = prev_info; return rc; } // This is the low-level function to insert elements into a list. The higher-level Jim_ListInsertElements() performs shared object // check and invalidates the string repr. This version is used in the internals of the List Object and is not exported. // NOTE: this function can be called only against objects with internal type of List. // An insertion point (idx) of -1 means end-of-list. static __device__ void ListInsertElements(Jim_Obj *listPtr, int idx, int elemc, Jim_Obj *const *elemVec) { int currentLen = listPtr->internalRep.listValue.len; int requiredLen = currentLen + elemc; if (requiredLen > listPtr->internalRep.listValue.maxLen) { // Don't do allocations of under 4 pointers. if (requiredLen < 2) requiredLen = 4; else requiredLen *= 2; listPtr->internalRep.listValue.ele = (Jim_Obj **)Jim_Realloc(listPtr->internalRep.listValue.ele, sizeof(Jim_Obj *) * requiredLen); listPtr->internalRep.listValue.maxLen = requiredLen; } if (idx < 0) idx = currentLen; Jim_Obj **point = listPtr->internalRep.listValue.ele + idx; memmove(point + elemc, point, (currentLen - idx) * sizeof(Jim_Obj *)); for (int i = 0; i < elemc; ++i) { point[i] = elemVec[i]; Jim_IncrRefCount(point[i]); } listPtr->internalRep.listValue.len += elemc; } // Convenience call to ListInsertElements() to append a single element. static __device__ void ListAppendElement(Jim_Obj *listPtr, Jim_Obj *objPtr) { ListInsertElements(listPtr, -1, 1, &objPtr); } // Appends every element of appendListPtr into listPtr. Both have to be of the list type. Convenience call to ListInsertElements() static __device__ void ListAppendList(Jim_Obj *listPtr, Jim_Obj *appendListPtr) { ListInsertElements(listPtr, -1, appendListPtr->internalRep.listValue.len, appendListPtr->internalRep.listValue.ele); } __device__ void Jim_ListAppendElement(Jim_Interp *interp, Jim_Obj *listPtr, Jim_Obj *objPtr) { JimPanic(Jim_IsShared(listPtr), "Jim_ListAppendElement called with shared object"); SetListFromAny(interp, listPtr); Jim_InvalidateStringRep(listPtr); ListAppendElement(listPtr, objPtr); } __device__ void Jim_ListAppendList(Jim_Interp *interp, Jim_Obj *listPtr, Jim_Obj *appendListPtr) { JimPanic(Jim_IsShared(listPtr), "Jim_ListAppendList called with shared object"); SetListFromAny(interp, listPtr); SetListFromAny(interp, appendListPtr); Jim_InvalidateStringRep(listPtr); ListAppendList(listPtr, appendListPtr); } __device__ int Jim_ListLength(Jim_Interp *interp, Jim_Obj *objPtr) { SetListFromAny(interp, objPtr); return objPtr->internalRep.listValue.len; } __device__ void Jim_ListInsertElements(Jim_Interp *interp, Jim_Obj *listPtr, int idx, int objc, Jim_Obj *const *objVec) { JimPanic(Jim_IsShared(listPtr), "Jim_ListInsertElement called with shared object"); SetListFromAny(interp, listPtr); if (idx >= 0 && idx > listPtr->internalRep.listValue.len) idx = listPtr->internalRep.listValue.len; else if (idx < 0) idx = 0; Jim_InvalidateStringRep(listPtr); ListInsertElements(listPtr, idx, objc, objVec); } __device__ Jim_Obj *Jim_ListGetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx) { SetListFromAny(interp, listPtr); if ((idx >= 0 && idx >= listPtr->internalRep.listValue.len) || (idx < 0 && (-idx - 1) >= listPtr->internalRep.listValue.len)) return NULL; if (idx < 0) idx = listPtr->internalRep.listValue.len + idx; return listPtr->internalRep.listValue.ele[idx]; } __device__ int Jim_ListIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx, Jim_Obj **objPtrPtr, int flags) { *objPtrPtr = Jim_ListGetIndex(interp, listPtr, idx); if (*objPtrPtr == NULL) { if (flags & JIM_ERRMSG) Jim_SetResultString(interp, "list index out of range", -1); return JIM_ERROR; } return JIM_OK; } static __device__ int ListSetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx, Jim_Obj *newObjPtr, int flags) { SetListFromAny(interp, listPtr); if ((idx >= 0 && idx >= listPtr->internalRep.listValue.len) || (idx < 0 && (-idx - 1) >= listPtr->internalRep.listValue.len)) { if (flags & JIM_ERRMSG) Jim_SetResultString(interp, "list index out of range", -1); return JIM_ERROR; } if (idx < 0) idx = listPtr->internalRep.listValue.len + idx; Jim_DecrRefCount(interp, listPtr->internalRep.listValue.ele[idx]); listPtr->internalRep.listValue.ele[idx] = newObjPtr; Jim_IncrRefCount(newObjPtr); return JIM_OK; } // Modify the list stored in the variable named 'varNamePtr' setting the element specified by the 'indexc' indexes objects in 'indexv', with the new element 'newObjptr'. (implements the [lset] command) __device__ int Jim_ListSetIndex(Jim_Interp *interp, Jim_Obj *varNamePtr, Jim_Obj *const *indexv, int indexc, Jim_Obj *newObjPtr) { Jim_Obj *varObjPtr, *objPtr; varObjPtr = objPtr = Jim_GetVariable(interp, varNamePtr, JIM_ERRMSG | JIM_UNSHARED); if (objPtr == NULL) return JIM_ERROR; int shared; if ((shared = Jim_IsShared(objPtr))) varObjPtr = objPtr = Jim_DuplicateObj(interp, objPtr); int idx; for (int i = 0; i < indexc - 1; i++) { Jim_Obj *listObjPtr = objPtr; if (Jim_GetIndex(interp, indexv[i], &idx) != JIM_OK) goto err; if (Jim_ListIndex(interp, listObjPtr, idx, &objPtr, JIM_ERRMSG) != JIM_OK) goto err; if (Jim_IsShared(objPtr)) { objPtr = Jim_DuplicateObj(interp, objPtr); ListSetIndex(interp, listObjPtr, idx, objPtr, JIM_NONE); } Jim_InvalidateStringRep(listObjPtr); } if (Jim_GetIndex(interp, indexv[indexc - 1], &idx) != JIM_OK) goto err; if (ListSetIndex(interp, objPtr, idx, newObjPtr, JIM_ERRMSG) == JIM_ERROR) goto err; Jim_InvalidateStringRep(objPtr); Jim_InvalidateStringRep(varObjPtr); if (Jim_SetVariable(interp, varNamePtr, varObjPtr) != JIM_OK) goto err; Jim_SetResult(interp, varObjPtr); return JIM_OK; err: if (shared) Jim_FreeNewObj(interp, varObjPtr); return JIM_ERROR; } __device__ Jim_Obj *Jim_ListJoin(Jim_Interp *interp, Jim_Obj *listObjPtr, const char *joinStr, int joinStrLen) { int listLen = Jim_ListLength(interp, listObjPtr); Jim_Obj *resObjPtr = Jim_NewEmptyStringObj(interp); for (int i = 0; i < listLen;) { Jim_AppendObj(interp, resObjPtr, Jim_ListGetIndex(interp, listObjPtr, i)); if (++i != listLen) Jim_AppendString(interp, resObjPtr, joinStr, joinStrLen); } return resObjPtr; } __device__ Jim_Obj *Jim_ConcatObj(Jim_Interp *interp, int objc, Jim_Obj *const *objv) { int i; // If all the objects in objv are lists, it's possible to return a list as result, that's the concatenation of all the lists. for (i = 0; i < objc; i++) if (!Jim_IsList(objv[i])) break; if (i == objc) { Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); for (i = 0; i < objc; i++) ListAppendList(objPtr, objv[i]); return objPtr; } else { // Else... we have to glue strings together int len = 0, objLen; // Compute the length for (i = 0; i < objc; i++) len += Jim_Length(objv[i]); if (objc) len += objc - 1; // Create the string rep, and a string object holding it. char *bytes, *p; p = bytes = (char *)Jim_Alloc(len + 1); for (i = 0; i < objc; i++) { const char *s = Jim_GetString(objv[i], &objLen); // Remove leading space while (objLen && isspace(*s)) { s++; objLen--; len--; } // And trailing space & Handle trailing backslash-space case while (objLen && isspace(s[objLen - 1])) { if (objLen > 1 && s[objLen - 2] == '\\') break; objLen--; len--; } memcpy(p, s, objLen); p += objLen; if (i + 1 != objc) { if (objLen) *p++ = ' '; // Drop the space calculated for this element that is instead null. else len--; } } *p = '\0'; return Jim_NewStringObjNoAlloc(interp, bytes, len); } } // Returns a list composed of the elements in the specified range. first and start are directly accepted as Jim_Objects and processed for the end?-index? case. __device__ Jim_Obj *Jim_ListRange(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) { int first, last; if (Jim_GetIndex(interp, firstObjPtr, &first) != JIM_OK || Jim_GetIndex(interp, lastObjPtr, &last) != JIM_OK) return NULL; int len = Jim_ListLength(interp, listObjPtr); // will convert into list first = JimRelToAbsIndex(len, first); last = JimRelToAbsIndex(len, last); int rangeLen; JimRelToAbsRange(len, &first, &last, &rangeLen); if (first == 0 && last == len) return listObjPtr; return Jim_NewListObj(interp, listObjPtr->internalRep.listValue.ele + first, rangeLen); } #pragma endregion // ----------------------------------------------------------------------------- // Dict object // ----------------------------------------------------------------------------- #pragma region Dict object static __device__ void FreeDictInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupDictInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void UpdateStringOfDict(struct Jim_Obj *objPtr); static __device__ int SetDictFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); // Dict HashTable Type. // Keys and Values are Jim objects. static __device__ unsigned int JimObjectHTHashFunction(const void *key) { int len; const char *str = Jim_GetString((Jim_Obj *)key, &len); return Jim_GenHashFunction((const unsigned char *)str, len); } static __device__ int JimObjectHTKeyCompare(void *privdata, const void *key1, const void *key2) { return Jim_StringEqObj((Jim_Obj *)key1, (Jim_Obj *)key2); } static __device__ void *JimObjectHTKeyValDup(void *privdata, const void *val) { Jim_IncrRefCount((Jim_Obj *)val); return (void *)val; } static __device__ void JimObjectHTKeyValDestructor(void *interp, void *val) { Jim_DecrRefCount((Jim_Interp *)interp, (Jim_Obj *)val); } __constant__ static const Jim_HashTableType JimDictHashTableType = { JimObjectHTHashFunction, // hash function JimObjectHTKeyValDup, // key dup JimObjectHTKeyValDup, // val dup JimObjectHTKeyCompare, // key compare JimObjectHTKeyValDestructor, // key destructor JimObjectHTKeyValDestructor // val destructor }; // Note that while the elements of the dict may contain references, the list object itself can't. This basically means that the // dict object string representation as a whole can't contain references that are not presents in the single elements. */ __constant__ static const Jim_ObjType _dictObjType = { "dict", FreeDictInternalRep, DupDictInternalRep, UpdateStringOfDict, JIM_TYPE_NONE, }; __device__ void FreeDictInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { JIM_NOTUSED(interp); Jim_FreeHashTable((Jim_HashTable *)objPtr->internalRep.ptr); Jim_Free(objPtr->internalRep.ptr); } __device__ void DupDictInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { // Create a new hash table Jim_HashTable *ht = (Jim_HashTable *)srcPtr->internalRep.ptr; Jim_HashTable *dupHt = (Jim_HashTable *)Jim_Alloc(sizeof(*dupHt)); Jim_InitHashTable(dupHt, &JimDictHashTableType, interp); if (ht->size != 0) Jim_ExpandHashTable(dupHt, ht->size); // Copy every element from the source to the dup hash table Jim_HashTableIterator htiter; Jim_HashEntry *he; JimInitHashTableIterator(ht, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) Jim_AddHashEntry(dupHt, he->key, he->u.val); dupPtr->internalRep.ptr = dupHt; dupPtr->typePtr = &_dictObjType; } static __device__ Jim_Obj **JimDictPairs(Jim_Obj *dictPtr, int *len) { Jim_HashTable *ht = (Jim_HashTable *)dictPtr->internalRep.ptr; // Turn the hash table into a flat vector of Jim_Objects. Jim_Obj **objv = (Jim_Obj **)Jim_Alloc((ht->used * 2) * sizeof(Jim_Obj *)); Jim_HashTableIterator htiter; Jim_HashEntry *he; JimInitHashTableIterator(ht, &htiter); int i = 0; while ((he = Jim_NextHashEntry(&htiter)) != NULL) { objv[i++] = (Jim_Obj *)Jim_GetHashEntryKey(he); objv[i++] = (Jim_Obj *)Jim_GetHashEntryVal(he); } *len = i; return objv; } static __device__ void UpdateStringOfDict(struct Jim_Obj *objPtr) { // Turn the hash table into a flat vector of Jim_Objects. int len; Jim_Obj **objv = JimDictPairs(objPtr, &len); // And now generate the string rep as a list JimMakeListStringRep(objPtr, objv, len); Jim_Free(objv); } static __device__ int SetDictFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { if (objPtr->typePtr == &_dictObjType) return JIM_OK; // A shared list, so get the string representation now to avoid changing the order in case of fast conversion to dict. if (Jim_IsList(objPtr) && Jim_IsShared(objPtr)) Jim_String(objPtr); // For simplicity, convert a non-list object to a list and then to a dict int listlen = Jim_ListLength(interp, objPtr); if (listlen % 2) { Jim_SetResultString(interp, "missing value to go with key", -1); return JIM_ERROR; } else { // Converting from a list to a dict can't fail Jim_HashTable *ht = (Jim_HashTable *)Jim_Alloc(sizeof(*ht)); Jim_InitHashTable(ht, &JimDictHashTableType, interp); for (int i = 0; i < listlen; i += 2) { Jim_Obj *keyObjPtr = Jim_ListGetIndex(interp, objPtr, i); Jim_Obj *valObjPtr = Jim_ListGetIndex(interp, objPtr, i + 1); Jim_ReplaceHashEntry(ht, keyObjPtr, valObjPtr); } Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_dictObjType; objPtr->internalRep.ptr = ht; return JIM_OK; } } // Dict object API // Add an element to a dict. objPtr must be of the "dict" type. The higher-level exported function is Jim_DictAddElement(). // If an element with the specified key already exists, the value associated is replaced with the new one. // if valueObjPtr == NULL, the key is instead removed if it exists. static __device__ int DictAddElement(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *keyObjPtr, Jim_Obj *valueObjPtr) { Jim_HashTable *ht = (Jim_HashTable *)objPtr->internalRep.ptr; if (valueObjPtr == NULL) // unset return Jim_DeleteHashEntry(ht, keyObjPtr); Jim_ReplaceHashEntry(ht, keyObjPtr, valueObjPtr); return JIM_OK; } // Add an element, higher-level interface for DictAddElement(). // If valueObjPtr == NULL, the key is removed if it exists. __device__ int Jim_DictAddElement(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *keyObjPtr, Jim_Obj *valueObjPtr) { JimPanic(Jim_IsShared(objPtr), "Jim_DictAddElement called with shared object"); if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_InvalidateStringRep(objPtr); return DictAddElement(interp, objPtr, keyObjPtr, valueObjPtr); } __device__ Jim_Obj *Jim_NewDictObj(Jim_Interp *interp, Jim_Obj *const *elements, int len) { JimPanic(len % 2, "Jim_NewDictObj() 'len' argument must be even"); Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_dictObjType; objPtr->bytes = NULL; objPtr->internalRep.ptr = Jim_Alloc(sizeof(Jim_HashTable)); Jim_InitHashTable((Jim_HashTable *)objPtr->internalRep.ptr, &JimDictHashTableType, interp); for (int i = 0; i < len; i += 2) DictAddElement(interp, objPtr, elements[i], elements[i + 1]); return objPtr; } // Return the value associated to the specified dict key. Returns JIM_OK if OK, JIM_ERROR if entry not found or -1 if can't create dict value // Sets *objPtrPtr to non-NULL only upon success. __device__ int Jim_DictKey(Jim_Interp *interp, Jim_Obj *dictPtr, Jim_Obj *keyPtr, Jim_Obj **objPtrPtr, int flags) { if (SetDictFromAny(interp, dictPtr) != JIM_OK) return -1; Jim_HashTable *ht = (Jim_HashTable *)dictPtr->internalRep.ptr; Jim_HashEntry *he; if ((he = Jim_FindHashEntry(ht, keyPtr)) == NULL) { if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "key \"%#s\" not known in dictionary", keyPtr); return JIM_ERROR; } *objPtrPtr = (Jim_Obj *)he->u.val; return JIM_OK; } // Return an allocated array of key/value pairs for the dictionary. Stores the length in *len __device__ int Jim_DictPairs(Jim_Interp *interp, Jim_Obj *dictPtr, Jim_Obj ***objPtrPtr, int *len) { if (SetDictFromAny(interp, dictPtr) != JIM_OK) return JIM_ERROR; *objPtrPtr = JimDictPairs(dictPtr, len); return JIM_OK; } // Return the value associated to the specified dict keys __device__ int Jim_DictKeysVector(Jim_Interp *interp, Jim_Obj *dictPtr, Jim_Obj *const *keyv, int keyc, Jim_Obj **objPtrPtr, int flags) { if (keyc == 0) { *objPtrPtr = dictPtr; return JIM_OK; } for (int i = 0; i < keyc; i++) { Jim_Obj *objPtr; int rc = Jim_DictKey(interp, dictPtr, keyv[i], &objPtr, flags); if (rc != JIM_OK) return rc; dictPtr = objPtr; } *objPtrPtr = dictPtr; return JIM_OK; } // Modify the dict stored into the variable named 'varNamePtr' setting the element specified by the 'keyc' keys objects in 'keyv', with the new value of the element 'newObjPtr'. // If newObjPtr == NULL the operation is to remove the given key from the dictionary. // If flags & JIM_ERRMSG, then failure to remove the key is considered an error and JIM_ERROR is returned. Otherwise it is ignored and JIM_OK is returned. __device__ int Jim_SetDictKeysVector(Jim_Interp *interp, Jim_Obj *varNamePtr, Jim_Obj *const *keyv, int keyc, Jim_Obj *newObjPtr, int flags) { Jim_Obj *varObjPtr, *objPtr; varObjPtr = objPtr = Jim_GetVariable(interp, varNamePtr, flags); if (objPtr == NULL) { // Cannot remove a key from non existing var if (newObjPtr == NULL && (flags & JIM_MUSTEXIST)) return JIM_ERROR; varObjPtr = objPtr = Jim_NewDictObj(interp, NULL, 0); if (Jim_SetVariable(interp, varNamePtr, objPtr) != JIM_OK) { Jim_FreeNewObj(interp, varObjPtr); return JIM_ERROR; } } int shared; if ((shared = Jim_IsShared(objPtr))) varObjPtr = objPtr = Jim_DuplicateObj(interp, objPtr); Jim_Obj *dictObjPtr; for (int i = 0; i < keyc; i++) { dictObjPtr = objPtr; // Check if it's a valid dictionary if (SetDictFromAny(interp, dictObjPtr) != JIM_OK) goto err; if (i == keyc - 1) { // Last key: Note that error on unset with missing last key is OK if (Jim_DictAddElement(interp, objPtr, keyv[keyc - 1], newObjPtr) != JIM_OK) if (newObjPtr || (flags & JIM_MUSTEXIST)) goto err; break; } // Check if the given key exists Jim_InvalidateStringRep(dictObjPtr); if (Jim_DictKey(interp, dictObjPtr, keyv[i], &objPtr, newObjPtr ? JIM_NONE : JIM_ERRMSG) == JIM_OK) { // This key exists at the current level. Make sure it's not shared! if (Jim_IsShared(objPtr)) { objPtr = Jim_DuplicateObj(interp, objPtr); DictAddElement(interp, dictObjPtr, keyv[i], objPtr); } } else { // Key not found. If it's an [unset] operation this is an error. Only the last key may not exist. if (newObjPtr == NULL) goto err; // Otherwise set an empty dictionary as key's value objPtr = Jim_NewDictObj(interp, NULL, 0); DictAddElement(interp, dictObjPtr, keyv[i], objPtr); } } // XXX: Is this necessary? Jim_InvalidateStringRep(objPtr); Jim_InvalidateStringRep(varObjPtr); if (Jim_SetVariable(interp, varNamePtr, varObjPtr) != JIM_OK) goto err; Jim_SetResult(interp, varObjPtr); return JIM_OK; err: if (shared) Jim_FreeNewObj(interp, varObjPtr); return JIM_ERROR; } #pragma endregion // ----------------------------------------------------------------------------- // Index object // ----------------------------------------------------------------------------- #pragma region Index object static __device__ void UpdateStringOfIndex(struct Jim_Obj *objPtr); static __device__ int SetIndexFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _indexObjType = { "index", NULL, NULL, UpdateStringOfIndex, JIM_TYPE_NONE, }; static __device__ void UpdateStringOfIndex(struct Jim_Obj *objPtr) { if (objPtr->internalRep.intValue == -1) JimSetStringBytes(objPtr, "end"); else { char buf[JIM_INTEGER_SPACE + 1]; if (objPtr->internalRep.intValue >= 0) sprintf(buf, "%d", objPtr->internalRep.intValue); // Must be <= -2 else sprintf(buf, "end%d", objPtr->internalRep.intValue + 1); JimSetStringBytes(objPtr, buf); } } static __device__ int SetIndexFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { // Get the string representation const char *str = Jim_String(objPtr); // Try to convert into an index int idx, end = 0; char *endptr; if (!strncmp(str, "end", 3)) { end = 1; str += 3; idx = 0; } else { idx = jim_strtol(str, &endptr); if (endptr == str) goto badindex; str = endptr; } // Now str may include or +<num> or -<num> if (*str == '+' || *str == '-') { int sign = (*str == '+' ? 1 : -1); idx += sign * jim_strtol(++str, &endptr); if (str == endptr || *endptr) goto badindex; str = endptr; } // The only thing left should be spaces while (isspace(*str)) { str++; } if (*str) goto badindex; // end-1 is repesented as -2 if (end) idx = (idx > 0 ? INT_MAX : idx--); else if (idx < 0) idx = -INT_MAX; // Free the old internal repr and set the new one Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_indexObjType; objPtr->internalRep.intValue = idx; return JIM_OK; badindex: Jim_SetResultFormatted(interp, "bad index \"%#s\": must be integer?[+-]integer? or end?[+-]integer?", objPtr); return JIM_ERROR; } __device__ int Jim_GetIndex(Jim_Interp *interp, Jim_Obj *objPtr, int *indexPtr) { // Avoid shimmering if the object is an integer if (objPtr->typePtr == &_intObjType) { jim_wide val = JimWideValue(objPtr); if (val < 0) *indexPtr = -INT_MAX; else if (val > INT_MAX) *indexPtr = INT_MAX; else *indexPtr = (int)val; return JIM_OK; } if (objPtr->typePtr != &_indexObjType && SetIndexFromAny(interp, objPtr) == JIM_ERROR) return JIM_ERROR; *indexPtr = objPtr->internalRep.intValue; return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Return Code Object // ----------------------------------------------------------------------------- #pragma region Return Code Object // NOTE: These must be kept in the same order as JIM_OK, JIM_ERROR, ... __constant__ static const char * const jimReturnCodes[] = { "ok", "error", "return", "break", "continue", "signal", "exit", "eval", NULL }; #define jimReturnCodesSize (sizeof(jimReturnCodes)/sizeof(*jimReturnCodes)) __constant__ static const Jim_ObjType _returnCodeObjType = { "return-code", NULL, NULL, NULL, JIM_TYPE_NONE, }; // Converts a (standard) return code to a string. Returns "?" for non-standard return codes. __device__ const char *Jim_ReturnCode(int code) { return (code < 0 || code >= (int)jimReturnCodesSize ? "?" : jimReturnCodes[code]); } static __device__ int SetReturnCodeFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { // Try to convert into an integer int returnCode; jim_wide wideValue; if (JimGetWideNoErr(interp, objPtr, &wideValue) != JIM_ERROR) returnCode = (int)wideValue; else if (Jim_GetEnum(interp, objPtr, jimReturnCodes, &returnCode, NULL, JIM_NONE) != JIM_OK) { Jim_SetResultFormatted(interp, "expected return code but got \"%#s\"", objPtr); return JIM_ERROR; } // Free the old internal repr and set the new one Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_returnCodeObjType; objPtr->internalRep.intValue = returnCode; return JIM_OK; } __device__ int Jim_GetReturnCode(Jim_Interp *interp, Jim_Obj *objPtr, int *intPtr) { if (objPtr->typePtr != &_returnCodeObjType && SetReturnCodeFromAny(interp, objPtr) == JIM_ERROR) return JIM_ERROR; *intPtr = objPtr->internalRep.intValue; return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Expression Parsing // ----------------------------------------------------------------------------- #pragma region Expression Parsing static __device__ int JimParseExprOperator(struct JimParserCtx *pc); static __device__ int JimParseExprNumber(struct JimParserCtx *pc); static __device__ int JimParseExprIrrational(struct JimParserCtx *pc); // Exrp's Stack machine operators opcodes // Binary operators (numbers) enum { // Continues on from the JIM_TT_ space // Operations JIM_EXPROP_MUL = JIM_TT_EXPR_OP,// 20 JIM_EXPROP_DIV, JIM_EXPROP_MOD, JIM_EXPROP_SUB, JIM_EXPROP_ADD, JIM_EXPROP_LSHIFT, JIM_EXPROP_RSHIFT, JIM_EXPROP_ROTL, JIM_EXPROP_ROTR, JIM_EXPROP_LT, JIM_EXPROP_GT, JIM_EXPROP_LTE, JIM_EXPROP_GTE, JIM_EXPROP_NUMEQ, JIM_EXPROP_NUMNE, JIM_EXPROP_BITAND, // 35 JIM_EXPROP_BITXOR, JIM_EXPROP_BITOR, // Note must keep these together JIM_EXPROP_LOGICAND, // 38 JIM_EXPROP_LOGICAND_LEFT, JIM_EXPROP_LOGICAND_RIGHT, // and these JIM_EXPROP_LOGICOR, // 41 JIM_EXPROP_LOGICOR_LEFT, JIM_EXPROP_LOGICOR_RIGHT, // and these // Ternary operators JIM_EXPROP_TERNARY, // 44 JIM_EXPROP_TERNARY_LEFT, JIM_EXPROP_TERNARY_RIGHT, // and these JIM_EXPROP_COLON, // 47 JIM_EXPROP_COLON_LEFT, JIM_EXPROP_COLON_RIGHT, JIM_EXPROP_POW, // 50 // Binary operators (strings) JIM_EXPROP_STREQ, // 51 JIM_EXPROP_STRNE, JIM_EXPROP_STRIN, JIM_EXPROP_STRNI, // Unary operators (numbers) JIM_EXPROP_NOT, // 55 JIM_EXPROP_BITNOT, JIM_EXPROP_UNARYMINUS, JIM_EXPROP_UNARYPLUS, // Functions JIM_EXPROP_FUNC_FIRST, // 59 JIM_EXPROP_FUNC_INT = JIM_EXPROP_FUNC_FIRST, JIM_EXPROP_FUNC_WIDE, JIM_EXPROP_FUNC_ABS, JIM_EXPROP_FUNC_DOUBLE, JIM_EXPROP_FUNC_ROUND, JIM_EXPROP_FUNC_RAND, JIM_EXPROP_FUNC_SRAND, // math functions from libm JIM_EXPROP_FUNC_SIN, // 65 JIM_EXPROP_FUNC_COS, JIM_EXPROP_FUNC_TAN, JIM_EXPROP_FUNC_ASIN, JIM_EXPROP_FUNC_ACOS, JIM_EXPROP_FUNC_ATAN, JIM_EXPROP_FUNC_SINH, JIM_EXPROP_FUNC_COSH, JIM_EXPROP_FUNC_TANH, JIM_EXPROP_FUNC_CEIL, JIM_EXPROP_FUNC_FLOOR, JIM_EXPROP_FUNC_EXP, JIM_EXPROP_FUNC_LOG, JIM_EXPROP_FUNC_LOG10, JIM_EXPROP_FUNC_SQRT, JIM_EXPROP_FUNC_POW, }; struct JimExprState { Jim_Obj **stack; int stacklen; int opcode; int skip; }; // Operators table typedef struct Jim_ExprOperator { const char *name; int (*funcop)(Jim_Interp*interp,struct JimExprState*e); unsigned char precedence; unsigned char arity; unsigned char lazy; unsigned char namelen; } Jim_ExprOperator; static __device__ void ExprPush(struct JimExprState *e, Jim_Obj *obj) { Jim_IncrRefCount(obj); e->stack[e->stacklen++] = obj; } static __device__ Jim_Obj *ExprPop(struct JimExprState *e) { return e->stack[--e->stacklen]; } static __device__ int JimExprOpNumUnary(Jim_Interp *interp, struct JimExprState *e) { int intresult = 1; int rc = JIM_OK; Jim_Obj *A = ExprPop(e); double dA, dC = 0; jim_wide wA, wC = 0; if ((A->typePtr != &_doubleObjType || A->bytes) && JimGetWideNoErr(interp, A, &wA) == JIM_OK) switch (e->opcode) { case JIM_EXPROP_FUNC_INT: case JIM_EXPROP_FUNC_WIDE: case JIM_EXPROP_FUNC_ROUND: case JIM_EXPROP_UNARYPLUS: wC = wA; break; case JIM_EXPROP_FUNC_DOUBLE: dC = (double)wA; intresult = 0; break; case JIM_EXPROP_FUNC_ABS: wC = (wA >= 0 ? wA : -wA); break; case JIM_EXPROP_UNARYMINUS: wC = -wA; break; case JIM_EXPROP_NOT: wC = !wA; break; default: abort(); } else if ((rc = Jim_GetDouble(interp, A, &dA)) == JIM_OK) switch (e->opcode) { case JIM_EXPROP_FUNC_INT: case JIM_EXPROP_FUNC_WIDE: wC = (long long)dA; break; case JIM_EXPROP_FUNC_ROUND: wC = (long long)(dA < 0 ? (dA - 0.5) : (dA + 0.5)); break; case JIM_EXPROP_FUNC_DOUBLE: case JIM_EXPROP_UNARYPLUS: dC = dA; intresult = 0; break; case JIM_EXPROP_FUNC_ABS: dC = (dA >= 0 ? dA : -dA); intresult = 0; break; case JIM_EXPROP_UNARYMINUS: dC = -dA; intresult = 0; break; case JIM_EXPROP_NOT: wC = !dA; break; default: abort(); } if (rc == JIM_OK) if (intresult) ExprPush(e, Jim_NewIntObj(interp, wC)); else ExprPush(e, Jim_NewDoubleObj(interp, dC)); Jim_DecrRefCount(interp, A); return rc; } static __device__ double JimRandDouble(Jim_Interp *interp) { unsigned long x; JimRandomBytes(interp, &x, sizeof(x)); return (double)x / (unsigned long)~0; } static __device__ int JimExprOpIntUnary(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *A = ExprPop(e); jim_wide wA; int rc = Jim_GetWide(interp, A, &wA); if (rc == JIM_OK) switch (e->opcode) { case JIM_EXPROP_BITNOT: ExprPush(e, Jim_NewIntObj(interp, ~wA)); break; case JIM_EXPROP_FUNC_SRAND: JimPrngSeed(interp, (unsigned char *)&wA, sizeof(wA)); ExprPush(e, Jim_NewDoubleObj(interp, JimRandDouble(interp))); break; default: abort(); } Jim_DecrRefCount(interp, A); return rc; } static __device__ int JimExprOpNone(Jim_Interp *interp, struct JimExprState *e) { JimPanic(e->opcode != JIM_EXPROP_FUNC_RAND, "JimExprOpNone only support rand()"); ExprPush(e, Jim_NewDoubleObj(interp, JimRandDouble(interp))); return JIM_OK; } #ifdef JIM_MATH_FUNCTIONS static __device__ int JimExprOpDoubleUnary(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *A = ExprPop(e); double dA, dC; int rc = Jim_GetDouble(interp, A, &dA); if (rc == JIM_OK) { switch (e->opcode) { case JIM_EXPROP_FUNC_SIN: dC = sin(dA); break; case JIM_EXPROP_FUNC_COS: dC = cos(dA); break; case JIM_EXPROP_FUNC_TAN: dC = tan(dA); break; case JIM_EXPROP_FUNC_ASIN: dC = asin(dA); break; case JIM_EXPROP_FUNC_ACOS: dC = acos(dA); break; case JIM_EXPROP_FUNC_ATAN: dC = atan(dA); break; case JIM_EXPROP_FUNC_SINH: dC = sinh(dA); break; case JIM_EXPROP_FUNC_COSH: dC = cosh(dA); break; case JIM_EXPROP_FUNC_TANH: dC = tanh(dA); break; case JIM_EXPROP_FUNC_CEIL: dC = ceil(dA); break; case JIM_EXPROP_FUNC_FLOOR: dC = floor(dA); break; case JIM_EXPROP_FUNC_EXP: dC = exp(dA); break; case JIM_EXPROP_FUNC_LOG: dC = log(dA); break; case JIM_EXPROP_FUNC_LOG10: dC = log10(dA); break; case JIM_EXPROP_FUNC_SQRT: dC = sqrt(dA); break; default: abort(); } ExprPush(e, Jim_NewDoubleObj(interp, dC)); } Jim_DecrRefCount(interp, A); return rc; } #endif // A binary operation on two ints static __device__ int JimExprOpIntBin(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); jim_wide wA, wB; int rc = JIM_ERROR; if (Jim_GetWide(interp, A, &wA) == JIM_OK && Jim_GetWide(interp, B, &wB) == JIM_OK) { jim_wide wC; rc = JIM_OK; switch (e->opcode) { case JIM_EXPROP_LSHIFT: wC = wA << wB; break; case JIM_EXPROP_RSHIFT: wC = wA >> wB; break; case JIM_EXPROP_BITAND: wC = wA & wB; break; case JIM_EXPROP_BITXOR: wC = wA ^ wB; break; case JIM_EXPROP_BITOR: wC = wA | wB; break; case JIM_EXPROP_MOD: if (wB == 0) { wC = 0; Jim_SetResultString(interp, "Division by zero", -1); rc = JIM_ERROR; } else { // From Tcl 8.x // This code is tricky: C doesn't guarantee much about the quotient or remainder, but Tcl does. // The remainder always has the same sign as the divisor and a smaller absolute value. int negative = 0; if (wB < 0) { wB = -wB; wA = -wA; negative = 1; } wC = wA % wB; if (wC < 0) wC += wB; if (negative) wC = -wC; } break; case JIM_EXPROP_ROTL: case JIM_EXPROP_ROTR: { // uint32_t would be better. But not everyone has inttypes.h? unsigned long uA = (unsigned long)wA; unsigned long uB = (unsigned long)wB; const unsigned int S = sizeof(unsigned long) * 8; // Shift left by the word size or more is undefined uB %= S; if (e->opcode == JIM_EXPROP_ROTR) uB = S - uB; wC = (unsigned long)(uA << uB) | (uA >> (S - uB)); break; } default: abort(); } ExprPush(e, Jim_NewIntObj(interp, wC)); } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return rc; } // A binary operation on two ints or two doubles (or two strings for some ops) static __device__ int JimExprOpBin(Jim_Interp *interp, struct JimExprState *e) { int intresult = 1; int rc = JIM_OK; double dA, dB, dC = 0; jim_wide wA, wB, wC = 0; Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); // Both are ints if ((A->typePtr != &_doubleObjType || A->bytes) && (B->typePtr != &_doubleObjType || B->bytes) && JimGetWideNoErr(interp, A, &wA) == JIM_OK && JimGetWideNoErr(interp, B, &wB) == JIM_OK) switch (e->opcode) { case JIM_EXPROP_POW: case JIM_EXPROP_FUNC_POW: wC = JimPowWide(wA, wB); break; case JIM_EXPROP_ADD: wC = wA + wB; break; case JIM_EXPROP_SUB: wC = wA - wB; break; case JIM_EXPROP_MUL: wC = wA * wB; break; case JIM_EXPROP_DIV: if (wB == 0) { Jim_SetResultString(interp, "Division by zero", -1); rc = JIM_ERROR; } else { // From Tcl 8.x // This code is tricky: C doesn't guarantee much about the quotient or remainder, but Tcl does. // The remainder always has the same sign as the divisor and a smaller absolute value. if (wB < 0) { wB = -wB; wA = -wA; } wC = wA / wB; if (wA % wB < 0) wC--; } break; case JIM_EXPROP_LT: wC = wA < wB; break; case JIM_EXPROP_GT: wC = wA > wB; break; case JIM_EXPROP_LTE: wC = wA <= wB; break; case JIM_EXPROP_GTE: wC = wA >= wB; break; case JIM_EXPROP_NUMEQ: wC = wA == wB; break; case JIM_EXPROP_NUMNE: wC = wA != wB; break; default: abort(); } else if (Jim_GetDouble(interp, A, &dA) == JIM_OK && Jim_GetDouble(interp, B, &dB) == JIM_OK) { intresult = 0; switch (e->opcode) { case JIM_EXPROP_POW: case JIM_EXPROP_FUNC_POW: #ifdef JIM_MATH_FUNCTIONS dC = pow(dA, dB); #else Jim_SetResultString(interp, "unsupported", -1); rc = JIM_ERROR; #endif break; case JIM_EXPROP_ADD: dC = dA + dB; break; case JIM_EXPROP_SUB: dC = dA - dB; break; case JIM_EXPROP_MUL: dC = dA * dB; break; case JIM_EXPROP_DIV: if (dB == 0) { #ifdef INFINITY dC = dA < 0 ? -INFINITY : INFINITY; #else dC = (dA < 0 ? -1.0 : 1.0) * strtod("Inf", NULL); #endif } else dC = dA / dB; break; case JIM_EXPROP_LT: wC = dA < dB; intresult = 1; break; case JIM_EXPROP_GT: wC = dA > dB; intresult = 1; break; case JIM_EXPROP_LTE: wC = dA <= dB; intresult = 1; break; case JIM_EXPROP_GTE: wC = dA >= dB; intresult = 1; break; case JIM_EXPROP_NUMEQ: wC = dA == dB; intresult = 1; break; case JIM_EXPROP_NUMNE: wC = dA != dB; intresult = 1; break; default: abort(); } } else { // Handle the string case // XXX: Could optimise the eq/ne case by checking lengths int i = Jim_StringCompareObj(interp, A, B, 0); switch (e->opcode) { case JIM_EXPROP_LT: wC = i < 0; break; case JIM_EXPROP_GT: wC = i > 0; break; case JIM_EXPROP_LTE: wC = i <= 0; break; case JIM_EXPROP_GTE: wC = i >= 0; break; case JIM_EXPROP_NUMEQ: wC = i == 0; break; case JIM_EXPROP_NUMNE: wC = i != 0; break; default: rc = JIM_ERROR; break; } } if (rc == JIM_OK) if (intresult) ExprPush(e, Jim_NewIntObj(interp, wC)); else ExprPush(e, Jim_NewDoubleObj(interp, dC)); Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return rc; } static __device__ int JimSearchList(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_Obj *valObj) { int listlen = Jim_ListLength(interp, listObjPtr); for (int i = 0; i < listlen; i++) if (Jim_StringEqObj(Jim_ListGetIndex(interp, listObjPtr, i), valObj)) return 1; return 0; } static __device__ int JimExprOpStrBin(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); jim_wide wC; switch (e->opcode) { case JIM_EXPROP_STREQ: case JIM_EXPROP_STRNE: wC = Jim_StringEqObj(A, B); if (e->opcode == JIM_EXPROP_STRNE) wC = !wC; break; case JIM_EXPROP_STRIN: wC = JimSearchList(interp, B, A); break; case JIM_EXPROP_STRNI: wC = !JimSearchList(interp, B, A); break; default: abort(); } ExprPush(e, Jim_NewIntObj(interp, wC)); Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return JIM_OK; } static __device__ int ExprBool(Jim_Interp *interp, Jim_Obj *obj) { long l; double d; if (Jim_GetLong(interp, obj, &l) == JIM_OK) return l != 0; if (Jim_GetDouble(interp, obj, &d) == JIM_OK) return d != 0; return -1; } static __device__ int JimExprOpAndLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *A = ExprPop(e); int rc = JIM_OK; switch (ExprBool(interp, A)) { case 0: // false, so skip RHS opcodes with a 0 result e->skip = (int)JimWideValue(skip); ExprPush(e, Jim_NewIntObj(interp, 0)); break; case 1: break; // true so continue case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, skip); return rc; } static __device__ int JimExprOpOrLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *A = ExprPop(e); int rc = JIM_OK; switch (ExprBool(interp, A)) { case 0: break; // false, so do nothing case 1: // true so skip RHS opcodes with a 1 result e->skip = (int)JimWideValue(skip); ExprPush(e, Jim_NewIntObj(interp, 1)); break; case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, skip); return rc; } static __device__ int JimExprOpAndOrRight(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *A = ExprPop(e); int rc = JIM_OK; switch (ExprBool(interp, A)) { case 0: ExprPush(e, Jim_NewIntObj(interp, 0)); break; case 1: ExprPush(e, Jim_NewIntObj(interp, 1)); break; case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); return rc; } static __device__ int JimExprOpTernaryLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *A = ExprPop(e); int rc = JIM_OK; // Repush A ExprPush(e, A); switch (ExprBool(interp, A)) { case 0: // false, skip RHS opcodes e->skip = (int)JimWideValue(skip); // Push a dummy value ExprPush(e, Jim_NewIntObj(interp, 0)); break; case 1: break; // true so do nothing case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, skip); return rc; } static __device__ int JimExprOpColonLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); // No need to check for A as non-boolean if (ExprBool(interp, A)) { // true, so skip RHS opcodes e->skip = (int)JimWideValue(skip); // Repush B as the answer ExprPush(e, B); } Jim_DecrRefCount(interp, skip); Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return JIM_OK; } static __device__ int JimExprOpNull(Jim_Interp *interp, struct JimExprState *e) { return JIM_OK; } enum { LAZY_NONE, LAZY_OP, LAZY_LEFT, LAZY_RIGHT }; // name - precedence - arity - opcode // This array *must* be kept in sync with the JIM_EXPROP enum. // The following macros pre-compute the string length at compile time. #define OPRINIT(N, P, A, F) {N, F, P, A, LAZY_NONE, sizeof(N) - 1} #define OPRINIT_LAZY(N, P, A, F, L) {N, F, P, A, L, sizeof(N) - 1} __constant__ static const struct Jim_ExprOperator Jim_ExprOperators[] = { OPRINIT("*", 110, 2, JimExprOpBin), OPRINIT("/", 110, 2, JimExprOpBin), OPRINIT("%", 110, 2, JimExprOpIntBin), // OPRINIT("-", 100, 2, JimExprOpBin), OPRINIT("+", 100, 2, JimExprOpBin), // OPRINIT("<<", 90, 2, JimExprOpIntBin), OPRINIT(">>", 90, 2, JimExprOpIntBin), // OPRINIT("<<<", 90, 2, JimExprOpIntBin), OPRINIT(">>>", 90, 2, JimExprOpIntBin), // OPRINIT("<", 80, 2, JimExprOpBin), OPRINIT(">", 80, 2, JimExprOpBin), OPRINIT("<=", 80, 2, JimExprOpBin), OPRINIT(">=", 80, 2, JimExprOpBin), // OPRINIT("==", 70, 2, JimExprOpBin), OPRINIT("!=", 70, 2, JimExprOpBin), // OPRINIT("&", 50, 2, JimExprOpIntBin), OPRINIT("^", 49, 2, JimExprOpIntBin), OPRINIT("|", 48, 2, JimExprOpIntBin), // OPRINIT_LAZY("&&", 10, 2, NULL, LAZY_OP), OPRINIT_LAZY(NULL, 10, 2, JimExprOpAndLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 10, 2, JimExprOpAndOrRight, LAZY_RIGHT), // OPRINIT_LAZY("||", 9, 2, NULL, LAZY_OP), OPRINIT_LAZY(NULL, 9, 2, JimExprOpOrLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 9, 2, JimExprOpAndOrRight, LAZY_RIGHT), // OPRINIT_LAZY("?", 5, 2, JimExprOpNull, LAZY_OP), OPRINIT_LAZY(NULL, 5, 2, JimExprOpTernaryLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 5, 2, JimExprOpNull, LAZY_RIGHT), // OPRINIT_LAZY(":", 5, 2, JimExprOpNull, LAZY_OP), OPRINIT_LAZY(NULL, 5, 2, JimExprOpColonLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 5, 2, JimExprOpNull, LAZY_RIGHT), // OPRINIT("**", 250, 2, JimExprOpBin), // OPRINIT("eq", 60, 2, JimExprOpStrBin), OPRINIT("ne", 60, 2, JimExprOpStrBin), // OPRINIT("in", 55, 2, JimExprOpStrBin), OPRINIT("ni", 55, 2, JimExprOpStrBin), // OPRINIT("!", 150, 1, JimExprOpNumUnary), OPRINIT("~", 150, 1, JimExprOpIntUnary), OPRINIT(NULL, 150, 1, JimExprOpNumUnary), OPRINIT(NULL, 150, 1, JimExprOpNumUnary), // OPRINIT("int", 200, 1, JimExprOpNumUnary), OPRINIT("wide", 200, 1, JimExprOpNumUnary), OPRINIT("abs", 200, 1, JimExprOpNumUnary), OPRINIT("double", 200, 1, JimExprOpNumUnary), OPRINIT("round", 200, 1, JimExprOpNumUnary), OPRINIT("rand", 200, 0, JimExprOpNone), OPRINIT("srand", 200, 1, JimExprOpIntUnary), // #ifdef JIM_MATH_FUNCTIONS OPRINIT("sin", 200, 1, JimExprOpDoubleUnary), OPRINIT("cos", 200, 1, JimExprOpDoubleUnary), OPRINIT("tan", 200, 1, JimExprOpDoubleUnary), OPRINIT("asin", 200, 1, JimExprOpDoubleUnary), OPRINIT("acos", 200, 1, JimExprOpDoubleUnary), OPRINIT("atan", 200, 1, JimExprOpDoubleUnary), OPRINIT("sinh", 200, 1, JimExprOpDoubleUnary), OPRINIT("cosh", 200, 1, JimExprOpDoubleUnary), OPRINIT("tanh", 200, 1, JimExprOpDoubleUnary), OPRINIT("ceil", 200, 1, JimExprOpDoubleUnary), OPRINIT("floor", 200, 1, JimExprOpDoubleUnary), OPRINIT("exp", 200, 1, JimExprOpDoubleUnary), OPRINIT("log", 200, 1, JimExprOpDoubleUnary), OPRINIT("log10", 200, 1, JimExprOpDoubleUnary), OPRINIT("sqrt", 200, 1, JimExprOpDoubleUnary), OPRINIT("pow", 200, 2, JimExprOpBin), #endif }; #undef OPRINIT #undef OPRINIT_LAZY #define JIM_EXPR_OPERATORS_NUM (sizeof(Jim_ExprOperators)/sizeof(struct Jim_ExprOperator)) static __device__ int JimParseExpression(struct JimParserCtx *pc) { // Discard spaces and quoted newline while (isspace(*pc->p) || (*(pc->p) == '\\' && *(pc->p + 1) == '\n')) { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } // Common case pc->tline = pc->linenr; pc->tstart = pc->p; if (pc->len == 0) { pc->tend = pc->p; pc->tt = JIM_TT_EOL; pc->eof = 1; return JIM_OK; } switch (*(pc->p)) { case '(': pc->tt = JIM_TT_SUBEXPR_START; goto singlechar; case ')': pc->tt = JIM_TT_SUBEXPR_END; goto singlechar; case ',': pc->tt = JIM_TT_SUBEXPR_COMMA; singlechar: pc->tend = pc->p; pc->p++; pc->len--; break; case '[': return JimParseCmd(pc); case '$': if (JimParseVar(pc) == JIM_ERROR) return JimParseExprOperator(pc); else return (pc->tt == JIM_TT_EXPRSUGAR ? JIM_ERROR : JIM_OK); // Don't allow expr sugar in expressions case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': return JimParseExprNumber(pc); case '"': return JimParseQuote(pc); case '{': return JimParseBrace(pc); case 'N': case 'I': case 'n': case 'i': if (JimParseExprIrrational(pc) == JIM_ERROR) return JimParseExprOperator(pc); break; default: return JimParseExprOperator(pc); } return JIM_OK; } static __device__ int JimParseExprNumber(struct JimParserCtx *pc) { // Assume an integer for now pc->tt = JIM_TT_EXPR_INT; jim_strtoull(pc->p, (char **)&pc->p); // Tried as an integer, but perhaps it parses as a double if (strchr("eENnIi.", *pc->p) || pc->p == pc->tstart) { // Some stupid compilers insist they are cleverer that we are. Even a (void) cast doesn't prevent this warning! char *end; if (strtod(pc->tstart, &end)) { } // nothing if (end == pc->tstart) return JIM_ERROR; if (end > pc->p) { // Yes, double captured more chars pc->tt = JIM_TT_EXPR_DOUBLE; pc->p = end; } } pc->tend = pc->p - 1; pc->len -= (int)(pc->p - pc->tstart); return JIM_OK; } static __device__ int JimParseExprIrrational(struct JimParserCtx *pc) { const char *irrationals[] = { "NaN", "nan", "NAN", "Inf", "inf", "INF", NULL }; for (int i = 0; irrationals[i]; i++) { const char *irr = irrationals[i]; if (!strncmp(irr, pc->p, 3)) { pc->p += 3; pc->len -= 3; pc->tend = pc->p - 1; pc->tt = JIM_TT_EXPR_DOUBLE; return JIM_OK; } } return JIM_ERROR; } static __device__ int JimParseExprOperator(struct JimParserCtx *pc) { // Try to get the longest match int bestIdx = -1, bestLen = 0; for (int i = 0; i < (signed)JIM_EXPR_OPERATORS_NUM; i++) { const char * const opname = Jim_ExprOperators[i].name; const int oplen = Jim_ExprOperators[i].namelen; if (opname == NULL || opname[0] != pc->p[0]) continue; if (oplen > bestLen && !strncmp(opname, pc->p, oplen)) { bestIdx = i + JIM_TT_EXPR_OP; bestLen = oplen; } } if (bestIdx == -1) return JIM_ERROR; // Validate paretheses around function arguments if (bestIdx >= JIM_EXPROP_FUNC_FIRST) { const char *p = pc->p + bestLen; int len = pc->len - bestLen; while (len && isspace(*p)) { len--; p++; } if (*p != '(') return JIM_ERROR; } pc->tend = pc->p + bestLen - 1; pc->p += bestLen; pc->len -= bestLen; pc->tt = bestIdx; return JIM_OK; } __constant__ static Jim_ExprOperator _dummy_op; static __device__ const struct Jim_ExprOperator *JimExprOperatorInfoByOpcode(int opcode) { return (opcode < JIM_TT_EXPR_OP ? &_dummy_op : &Jim_ExprOperators[opcode - JIM_TT_EXPR_OP]); } __constant__ static const char * const _tt_names[JIM_TT_EXPR_OP] = { "NIL", "STR", "ESC", "VAR", "ARY", "CMD", "SEP", "EOL", "EOF", "LIN", "WRD", "(((", ")))", ",,,", "INT", "DBL", "$()" }; #ifdef __HIPCC__ __device__ char _jim_tt_name_buf[20]; #endif __device__ const char *jim_tt_name(int type) { if (type < JIM_TT_EXPR_OP) return _tt_names[type]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(type); #ifndef __HIPCC__ static char _jim_tt_name_buf[20]; #endif if (op->name) return op->name; sprintf(_jim_tt_name_buf, "(%d)", type); return _jim_tt_name_buf; } #pragma endregion // ----------------------------------------------------------------------------- // Expression Object // ----------------------------------------------------------------------------- #pragma region Expression Object static __device__ void FreeExprInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupExprInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ int SetExprFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _exprObjType = { "expression", FreeExprInternalRep, DupExprInternalRep, NULL, JIM_TYPE_REFERENCES, }; // Expr bytecode structure typedef struct ExprByteCode { ScriptToken *token; // Tokens array int len; // Length as number of tokens int inUse; // Used for sharing } ExprByteCode; static __device__ void ExprFreeByteCode(Jim_Interp *interp, ExprByteCode *expr) { for (int i = 0; i < expr->len; i++) Jim_DecrRefCount(interp, expr->token[i].objPtr); Jim_Free(expr->token); Jim_Free(expr); } static __device__ void FreeExprInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { ExprByteCode *expr = (ExprByteCode *)objPtr->internalRep.ptr; if (expr) { if (--expr->inUse != 0) return; ExprFreeByteCode(interp, expr); } } static __device__ void DupExprInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); JIM_NOTUSED(srcPtr); // Just returns an simple string dupPtr->typePtr = NULL; } // Check if an expr program looks correct static __device__ int ExprCheckCorrectness(ExprByteCode *expr) { int stacklen = 0; int ternary = 0; // Try to check if there are stack underflows, and make sure at the end of the program there is a single result on the stack. for (int i = 0; i < expr->len; i++) { ScriptToken *t = &expr->token[i]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); stacklen -= op->arity; if (stacklen < 0) break; if (t->type == JIM_EXPROP_TERNARY || t->type == JIM_EXPROP_TERNARY_LEFT) ternary++; else if (t->type == JIM_EXPROP_COLON || t->type == JIM_EXPROP_COLON_LEFT) ternary--; // All operations and operands add one to the stack stacklen++; } return (stacklen != 1 || ternary != 0 ? JIM_ERROR : JIM_OK); } // This procedure converts every occurrence of || and && opereators in lazy unary versions. // a b || is converted into: // // a <offset> |L b |R // // a b && is converted into: // // a <offset> &L b &R // // "|L" checks if 'a' is true: // 1) if it is true pushes 1 and skips <offset> instructions to reach the opcode just after |R. // 2) if it is false does nothing. // "|R" checks if 'b' is true: // 1) if it is true pushes 1, otherwise pushes 0. // // "&L" checks if 'a' is true: // 1) if it is true does nothing. // 2) If it is false pushes 0 and skips <offset> instructions to reach the opcode just after &R // "&R" checks if 'a' is true: // if it is true pushes 1, otherwise pushes 0. static __device__ int ExprAddLazyOperator(Jim_Interp *interp, ExprByteCode *expr, ParseToken *t) { // Search for the end of the first operator int leftindex = expr->len - 1; int arity = 1; while (arity) { ScriptToken *tt = &expr->token[leftindex]; if (tt->type >= JIM_TT_EXPR_OP) arity += JimExprOperatorInfoByOpcode(tt->type)->arity; arity--; if (--leftindex < 0) return JIM_ERROR; } leftindex++; // Move them up memmove(&expr->token[leftindex + 2], &expr->token[leftindex], sizeof(*expr->token) * (expr->len - leftindex)); expr->len += 2; int offset = (expr->len - leftindex) - 1; // Now we rely on the fact the the left and right version have opcodes 1 and 2 after the main opcode respectively expr->token[leftindex + 1].type = t->type + 1; expr->token[leftindex + 1].objPtr = interp->emptyObj; expr->token[leftindex].type = JIM_TT_EXPR_INT; expr->token[leftindex].objPtr = Jim_NewIntObj(interp, offset); // Now add the 'R' operator expr->token[expr->len].objPtr = interp->emptyObj; expr->token[expr->len].type = t->type + 2; expr->len++; // Do we need to adjust the skip count for any &L, |L, ?L or :L in the left operand? */ for (int i = leftindex - 1; i > 0; i--) { const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(expr->token[i].type); if (op->lazy == LAZY_LEFT) if (JimWideValue(expr->token[i - 1].objPtr) + i - 1 >= leftindex) JimWideValue(expr->token[i - 1].objPtr) += 2; } return JIM_OK; } static __device__ int ExprAddOperator(Jim_Interp *interp, ExprByteCode * expr, ParseToken *t) { struct ScriptToken *token = &expr->token[expr->len]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); if (op->lazy == LAZY_OP) { if (ExprAddLazyOperator(interp, expr, t) != JIM_OK) { Jim_SetResultFormatted(interp, "Expression has bad operands to %s", op->name); return JIM_ERROR; } } else { token->objPtr = interp->emptyObj; token->type = t->type; expr->len++; } return JIM_OK; } // Returns the index of the COLON_LEFT to the left of 'right_index' taking into account nesting. // The expression *must* be well formed, thus a COLON_LEFT will always be found. static __device__ int ExprTernaryGetColonLeftIndex(ExprByteCode *expr, int right_index) { int ternary_count = 1; right_index--; while (right_index > 1) { if (expr->token[right_index].type == JIM_EXPROP_TERNARY_LEFT) ternary_count--; else if (expr->token[right_index].type == JIM_EXPROP_COLON_RIGHT) ternary_count++; else if (expr->token[right_index].type == JIM_EXPROP_COLON_LEFT && ternary_count == 1) return right_index; right_index--; } return -1; // notreached } // Find the left/right indices for the ternary expression to the left of 'right_index'. // Returns 1 if found, and fills in *prev_right_index and *prev_left_index. Otherwise returns 0. static __device__ int ExprTernaryGetMoveIndices(ExprByteCode *expr, int right_index, int *prev_right_index, int *prev_left_index) { int i = right_index - 1; int ternary_count = 1; while (i > 1) { if (expr->token[i].type == JIM_EXPROP_TERNARY_LEFT) { if (--ternary_count == 0 && expr->token[i - 2].type == JIM_EXPROP_COLON_RIGHT) { *prev_right_index = i - 2; *prev_left_index = ExprTernaryGetColonLeftIndex(expr, *prev_right_index); return 1; } } else if (expr->token[i].type == JIM_EXPROP_COLON_RIGHT) { if (ternary_count == 0) return 0; ternary_count++; } i--; } return 0; } // ExprTernaryReorderExpression description // ======================================== // ?: is right-to-left associative which doesn't work with the stack-based expression engine. The fix is to reorder the bytecode. // // The expression: // expr 1?2:0?3:4 // // Has initial bytecode: // '1' '2' (40=TERNARY_LEFT) '2' (41=TERNARY_RIGHT) '2' (43=COLON_LEFT) '0' (44=COLON_RIGHT) // '2' (40=TERNARY_LEFT) '3' (41=TERNARY_RIGHT) '2' (43=COLON_LEFT) '4' (44=COLON_RIGHT) // // The fix involves simulating this expression instead: // expr 1?2:(0?3:4) // // With the following bytecode: // '1' '2' (40=TERNARY_LEFT) '2' (41=TERNARY_RIGHT) '10' (43=COLON_LEFT) '0' '2' (40=TERNARY_LEFT) // '3' (41=TERNARY_RIGHT) '2' (43=COLON_LEFT) '4' (44=COLON_RIGHT) (44=COLON_RIGHT) // // i.e. The token COLON_RIGHT at index 8 is moved towards the end of the stack, all tokens above 8 are shifted down and the skip count of the token JIM_EXPROP_COLON_LEFT at index 5 is // incremented by the amount tokens shifted down. The token JIM_EXPROP_COLON_RIGHT that is moved is identified as immediately preceeding a token JIM_EXPROP_TERNARY_LEFT // // ExprTernaryReorderExpression works thus as follows : // - start from the end of the stack // - while walking towards the beginning of the stack // if token=JIM_EXPROP_COLON_RIGHT then // find the associated token JIM_EXPROP_TERNARY_LEFT, which allows to // find the associated token previous(JIM_EXPROP_COLON_RIGHT) // find the associated token previous(JIM_EXPROP_LEFT_RIGHT) // if all found then // perform the rotation // update the skip count of the token previous(JIM_EXPROP_LEFT_RIGHT) // end if // end if // // Note: care has to be taken for nested ternary constructs!!! static __device__ void ExprTernaryReorderExpression(Jim_Interp *interp, ExprByteCode *expr) { for (int i = expr->len - 1; i > 1; i--) { if (expr->token[i].type != JIM_EXPROP_COLON_RIGHT) continue; // COLON_RIGHT found: get the indexes needed to move the tokens in the stack (if any) int prev_right_index; int prev_left_index; if (ExprTernaryGetMoveIndices(expr, i, &prev_right_index, &prev_left_index) == 0) continue; // rotate tokens down // // +-> [i] : JIM_EXPROP_COLON_RIGHT // | | | // | V V // | [...] : ... // | | | // | V V // | [...] : ... // | | | // | V V // +- [prev_right_index] : JIM_EXPROP_COLON_RIGHT ScriptToken tmp = expr->token[prev_right_index]; for (int j = prev_right_index; j < i; j++) expr->token[j] = expr->token[j + 1]; expr->token[i] = tmp; // Increment the 'skip' count associated to the previous JIM_EXPROP_COLON_LEFT token // This is 'colon left increment' = i - prev_right_index // [prev_left_index] : JIM_EXPROP_LEFT_RIGHT // [prev_left_index-1] : skip_count JimWideValue(expr->token[prev_left_index-1].objPtr) += (i - prev_right_index); // Adjust for i-- in the loop i++; } } static __device__ ExprByteCode *ExprCreateByteCode(Jim_Interp *interp, const ParseTokenList *tokenlist, Jim_Obj *fileNameObj) { int ok = 1; int i; int prevtt = JIM_TT_NONE; int have_ternary = 0; // -1 for EOL int count = tokenlist->count - 1; ExprByteCode *expr = (ExprByteCode *)Jim_Alloc(sizeof(*expr)); expr->inUse = 1; expr->len = 0; Jim_Stack stack; Jim_InitStack(&stack); // Need extra bytecodes for lazy operators. Also check for the ternary operator for (i = 0; i < tokenlist->count; i++) { ParseToken *t = &tokenlist->list[i]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); if (op->lazy == LAZY_OP) { count += 2; // Ternary is a lazy op but also needs reordering if (t->type == JIM_EXPROP_TERNARY) have_ternary = 1; } } expr->token = (ScriptToken *)Jim_Alloc(sizeof(ScriptToken) * count); for (i = 0; i < tokenlist->count && ok; i++) { ParseToken *t = &tokenlist->list[i]; // Next token will be stored here struct ScriptToken *token = &expr->token[expr->len]; if (t->type == JIM_TT_EOL) break; switch (t->type) { case JIM_TT_STR: case JIM_TT_ESC: case JIM_TT_VAR: case JIM_TT_DICTSUGAR: case JIM_TT_EXPRSUGAR: case JIM_TT_CMD: token->type = t->type; strexpr: token->objPtr = Jim_NewStringObj(interp, t->token, t->len); // Only commands need source info if (t->type == JIM_TT_CMD) JimSetSourceInfo(interp, token->objPtr, fileNameObj, t->line); expr->len++; break; case JIM_TT_EXPR_INT: case JIM_TT_EXPR_DOUBLE: { char *endptr; if (t->type == JIM_TT_EXPR_INT) token->objPtr = Jim_NewIntObj(interp, jim_strtoull(t->token, &endptr)); else token->objPtr = Jim_NewDoubleObj(interp, strtod(t->token, &endptr)); if (endptr != t->token + t->len) { // Conversion failed, so just store it as a string Jim_FreeNewObj(interp, token->objPtr); token->type = JIM_TT_STR; goto strexpr; } token->type = t->type; expr->len++; break; } case JIM_TT_SUBEXPR_START: Jim_StackPush(&stack, t); prevtt = JIM_TT_NONE; continue; case JIM_TT_SUBEXPR_COMMA: continue; // Simple approach. Comma is simply ignored case JIM_TT_SUBEXPR_END: ok = 0; while (Jim_StackLen(&stack)) { ParseToken *tt = (ParseToken *)Jim_StackPop(&stack); if (tt->type == JIM_TT_SUBEXPR_START) { ok = 1; break; } if (ExprAddOperator(interp, expr, tt) != JIM_OK) goto err; } if (!ok) { Jim_SetResultString(interp, "Unexpected close parenthesis", -1); goto err; } break; default: { // Must be an operator // Convert -/+ to unary minus or unary plus if necessary if (prevtt == JIM_TT_NONE || prevtt >= JIM_TT_EXPR_OP) { if (t->type == JIM_EXPROP_SUB) t->type = JIM_EXPROP_UNARYMINUS; else if (t->type == JIM_EXPROP_ADD) t->type = JIM_EXPROP_UNARYPLUS; } const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); // Now handle precedence ParseToken *tt; while ((tt = (ParseToken *)Jim_StackPeek(&stack)) != NULL) { const struct Jim_ExprOperator *tt_op = JimExprOperatorInfoByOpcode(tt->type); // Note that right-to-left associativity of ?: operator is handled later if (op->arity != 1 && tt_op->precedence >= op->precedence) { if (ExprAddOperator(interp, expr, tt) != JIM_OK) { ok = 0; goto err; } Jim_StackPop(&stack); } else break; } Jim_StackPush(&stack, t); break; } } prevtt = t->type; } // Reduce any remaining subexpr while (Jim_StackLen(&stack)) { ParseToken *tt = (ParseToken *)Jim_StackPop(&stack); if (tt->type == JIM_TT_SUBEXPR_START) { ok = 0; Jim_SetResultString(interp, "Missing close parenthesis", -1); goto err; } if (ExprAddOperator(interp, expr, tt) != JIM_OK) { ok = 0; goto err; } } if (have_ternary) ExprTernaryReorderExpression(interp, expr); err: // Free the stack used for the compilation Jim_FreeStack(&stack); for (i = 0; i < expr->len; i++) Jim_IncrRefCount(expr->token[i].objPtr); if (!ok) { ExprFreeByteCode(interp, expr); return NULL; } return expr; } // This method takes the string representation of an expression and generates a program for the Expr's stack-based VM. static __device__ int SetExprFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { int rc = JIM_ERROR; // Try to get information about filename / line number int line; Jim_Obj *fileNameObj; if (objPtr->typePtr == &_sourceObjType) { fileNameObj = objPtr->internalRep.sourceValue.fileNameObj; line = objPtr->internalRep.sourceValue.lineNumber; } else { fileNameObj = interp->emptyObj; line = 1; } Jim_IncrRefCount(fileNameObj); int exprTextLen; const char *exprText = Jim_GetString(objPtr, &exprTextLen); // Initially tokenise the expression into tokenlist ParseTokenList tokenlist; ScriptTokenListInit(&tokenlist); struct JimParserCtx parser; JimParserInit(&parser, exprText, exprTextLen, line); struct ExprByteCode *expr; while (!parser.eof) { if (JimParseExpression(&parser) != JIM_OK) { ScriptTokenListFree(&tokenlist); invalidexpr: Jim_SetResultFormatted(interp, "syntax error in expression: \"%#s\"", objPtr); expr = NULL; goto err; } ScriptAddToken(&tokenlist, parser.tstart, (int)(parser.tend - parser.tstart + 1), parser.tt, parser.tline); } #ifdef DEBUG_SHOW_EXPR_TOKENS { printf("==== Expr Tokens (%s) ====\n", Jim_String(fileNameObj)); for (int i = 0; i < tokenlist.count; i++) printf("[%2d]@%d %s '%.*s'\n", i, tokenlist.list[i].line, jim_tt_name(tokenlist.list[i].type), tokenlist.list[i].len, tokenlist.list[i].token); } #endif if (JimParseCheckMissing(interp, parser.missing.ch) == JIM_ERROR) { ScriptTokenListFree(&tokenlist); Jim_DecrRefCount(interp, fileNameObj); return JIM_ERROR; } // Now create the expression bytecode from the tokenlist expr = ExprCreateByteCode(interp, &tokenlist, fileNameObj); // No longer need the token list ScriptTokenListFree(&tokenlist); if (!expr) goto err; #ifdef DEBUG_SHOW_EXPR { printf("==== Expr ====\n"); for (int i = 0; i < expr->len; i++) { ScriptToken *t = &expr->token[i]; printf("[%2d] %s '%s'\n", i, jim_tt_name(t->type), Jim_String(t->objPtr)); } } #endif // Check program correctness if (ExprCheckCorrectness(expr) != JIM_OK) { ExprFreeByteCode(interp, expr); goto invalidexpr; } rc = JIM_OK; err: // Free the old internal rep and set the new one Jim_DecrRefCount(interp, fileNameObj); Jim_FreeIntRep(interp, objPtr); Jim_SetIntRepPtr(objPtr, expr); objPtr->typePtr = &_exprObjType; return rc; } static __device__ ExprByteCode *JimGetExpression(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_exprObjType) if (SetExprFromAny(interp, objPtr) != JIM_OK) return NULL; return (ExprByteCode *)Jim_GetIntRepPtr(objPtr); } #ifdef JIM_OPTIMIZATION static __device__ Jim_Obj *JimExprIntValOrVar(Jim_Interp *interp, const ScriptToken *token) { if (token->type == JIM_TT_EXPR_INT) return token->objPtr; else if (token->type == JIM_TT_VAR) return Jim_GetVariable(interp, token->objPtr, JIM_NONE); else if (token->type == JIM_TT_DICTSUGAR) return JimExpandDictSugar(interp, token->objPtr); else return NULL; } #endif // ----------------------------------------------------------------------------- // Expressions evaluation. // Jim uses a specialized stack-based virtual machine for expressions, that takes advantage of the fact that expr's operators can't be redefined. // Jim_EvalExpression() uses the bytecode compiled by SetExprFromAny() method of the "expression" object. // On success a Tcl Object containing the result of the evaluation is stored into expResultPtrPtr (having refcount of 1), and JIM_OK is returned. // On error the function returns a retcode != to JIM_OK and set a suitable error on the interp. // ----------------------------------------------------------------------------- #define JIM_EE_STATICSTACK_LEN 10 __device__ int Jim_EvalExpression(Jim_Interp *interp, Jim_Obj *exprObjPtr, Jim_Obj **exprResultPtrPtr) { Jim_Obj *staticStack[JIM_EE_STATICSTACK_LEN]; int i; int retcode = JIM_OK; struct JimExprState e; ExprByteCode *expr = JimGetExpression(interp, exprObjPtr); if (!expr) return JIM_ERROR; // error in expression #ifdef JIM_OPTIMIZATION // Check for one of the following common expressions used by while/for // CONST // $a // !$a // $a < CONST, $a < $b // $a <= CONST, $a <= $b // $a > CONST, $a > $b // $a >= CONST, $a >= $b // $a != CONST, $a != $b // $a == CONST, $a == $b { Jim_Obj *objPtr; // STEP 1 -- Check if there are the conditions to run the specialized version of while switch (expr->len) { case 1: objPtr = JimExprIntValOrVar(interp, &expr->token[0]); if (objPtr) { Jim_IncrRefCount(objPtr); *exprResultPtrPtr = objPtr; return JIM_OK; } break; case 2: if (expr->token[1].type == JIM_EXPROP_NOT) { objPtr = JimExprIntValOrVar(interp, &expr->token[0]); if (objPtr && JimIsWide(objPtr)) { *exprResultPtrPtr = (JimWideValue(objPtr) ? interp->falseObj : interp->trueObj); Jim_IncrRefCount(*exprResultPtrPtr); return JIM_OK; } } break; case 3: objPtr = JimExprIntValOrVar(interp, &expr->token[0]); if (objPtr && JimIsWide(objPtr)) { Jim_Obj *objPtr2 = JimExprIntValOrVar(interp, &expr->token[1]); if (objPtr2 && JimIsWide(objPtr2)) { jim_wide wideValueA = JimWideValue(objPtr); jim_wide wideValueB = JimWideValue(objPtr2); int cmpRes; switch (expr->token[2].type) { case JIM_EXPROP_LT: cmpRes = wideValueA < wideValueB; break; case JIM_EXPROP_LTE: cmpRes = wideValueA <= wideValueB; break; case JIM_EXPROP_GT: cmpRes = wideValueA > wideValueB; break; case JIM_EXPROP_GTE: cmpRes = wideValueA >= wideValueB; break; case JIM_EXPROP_NUMEQ: cmpRes = wideValueA == wideValueB; break; case JIM_EXPROP_NUMNE: cmpRes = wideValueA != wideValueB; break; default: goto noopt; } *exprResultPtrPtr = (cmpRes ? interp->trueObj : interp->falseObj); Jim_IncrRefCount(*exprResultPtrPtr); return JIM_OK; } } break; } } noopt: #endif // In order to avoid that the internal repr gets freed due to shimmering of the exprObjPtr's object, we make the internal rep shared. expr->inUse++; // The stack-based expr VM itself // Stack allocation. Expr programs have the feature that a program of length N can't require a stack longer than N. e.stack = (expr->len > JIM_EE_STATICSTACK_LEN ? (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * expr->len) : staticStack); e.stacklen = 0; // Execute every instruction Jim_Obj *objPtr; for (i = 0; i < expr->len && retcode == JIM_OK; i++) switch (expr->token[i].type) { case JIM_TT_EXPR_INT: case JIM_TT_EXPR_DOUBLE: case JIM_TT_STR: ExprPush(&e, expr->token[i].objPtr); break; case JIM_TT_VAR: objPtr = Jim_GetVariable(interp, expr->token[i].objPtr, JIM_ERRMSG); if (objPtr) ExprPush(&e, objPtr); else retcode = JIM_ERROR; break; case JIM_TT_DICTSUGAR: objPtr = JimExpandDictSugar(interp, expr->token[i].objPtr); if (objPtr) ExprPush(&e, objPtr); else retcode = JIM_ERROR; break; case JIM_TT_ESC: retcode = Jim_SubstObj(interp, expr->token[i].objPtr, &objPtr, JIM_NONE); if (retcode == JIM_OK) ExprPush(&e, objPtr); break; case JIM_TT_CMD: retcode = Jim_EvalObj(interp, expr->token[i].objPtr); if (retcode == JIM_OK) ExprPush(&e, Jim_GetResult(interp)); break; default: { // Find and execute the operation e.skip = 0; e.opcode = expr->token[i].type; retcode = JimExprOperatorInfoByOpcode(e.opcode)->funcop(interp, &e); // Skip some opcodes if necessary i += e.skip; continue; } } expr->inUse--; if (retcode == JIM_OK) *exprResultPtrPtr = ExprPop(&e); else for (i = 0; i < e.stacklen; i++) Jim_DecrRefCount(interp, e.stack[i]); if (e.stack != staticStack) Jim_Free(e.stack); return retcode; } __device__ int Jim_GetBoolFromExpr(Jim_Interp *interp, Jim_Obj *exprObjPtr, int *boolPtr) { jim_wide wideValue; double doubleValue; Jim_Obj *exprResultPtr; int retcode = Jim_EvalExpression(interp, exprObjPtr, &exprResultPtr); if (retcode != JIM_OK) return retcode; if (JimGetWideNoErr(interp, exprResultPtr, &wideValue) != JIM_OK) { if (Jim_GetDouble(interp, exprResultPtr, &doubleValue) != JIM_OK) { Jim_DecrRefCount(interp, exprResultPtr); return JIM_ERROR; } else { Jim_DecrRefCount(interp, exprResultPtr); *boolPtr = doubleValue != 0; return JIM_OK; } } *boolPtr = wideValue != 0; Jim_DecrRefCount(interp, exprResultPtr); return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // ScanFormat String Object // ----------------------------------------------------------------------------- #pragma region ScanFormat String Object // This Jim_Obj will held a parsed representation of a format string passed to the Jim_ScanString command. For error diagnostics, the scanformat string has // to be parsed in its entirely first and then, if correct, can be used for scanning. To avoid endless re-parsing, the parsed representation will be // stored in an internal representation and re-used for performance reason. // A ScanFmtPartDescr will held the information of /one/ part of the whole scanformat string. This part will later be used to extract information // out from the string to be parsed by Jim_ScanString typedef struct ScanFmtPartDescr { char *arg; // Specification of a CHARSET conversion char *prefix; // Prefix to be scanned literally before conversion size_t width; // Maximal width of input to be converted int pos; // -1 - no assign, 0 - natural pos, >0 - XPG3 pos char type; // Type of conversion (e.g. c, d, f) char modifier; // Modify type (e.g. l - long, h - short } ScanFmtPartDescr; //The ScanFmtStringObj will hold the internal representation of a scanformat string parsed and separated in part descriptions. Furthermore it contains //the original string representation of the scanformat string to allow for fast update of the Jim_Obj's string representation part. //As an add-on the internal object representation adds some scratch pad area for usage by Jim_ScanString to avoid endless allocating and freeing of memory for purpose of string scanning. //The error member points to a static allocated string in case of a mal-formed scanformat string or it contains '0' (NULL) in case of a valid parse representation. //The whole memory of the internal representation is allocated as a single area of memory that will be internally separated. So freeing and duplicating of such an object is cheap typedef struct ScanFmtStringObj { jim_wide size; // Size of internal repr in bytes char *stringRep; // Original string representation size_t count; // Number of ScanFmtPartDescr contained size_t convCount; // Number of conversions that will assign size_t maxPos; // Max position index if XPG3 is used const char *error; // Ptr to error text (NULL if no error char *scratch; // Some scratch pad used by Jim_ScanString ScanFmtPartDescr descr[1]; // The vector of partial descriptions } ScanFmtStringObj; static __device__ void FreeScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void UpdateStringOfScanFmt(Jim_Obj *objPtr); __constant__ static const Jim_ObjType _scanFmtStringObjType = { "scanformatstring", FreeScanFmtInternalRep, DupScanFmtInternalRep, UpdateStringOfScanFmt, JIM_TYPE_NONE, }; __device__ void FreeScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { JIM_NOTUSED(interp); Jim_Free((char *)objPtr->internalRep.ptr); objPtr->internalRep.ptr = 0; } __device__ void DupScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); size_t size = (size_t)((ScanFmtStringObj *)srcPtr->internalRep.ptr)->size; ScanFmtStringObj *newVec = (ScanFmtStringObj *)Jim_Alloc((int)size); memcpy(newVec, srcPtr->internalRep.ptr, size); dupPtr->internalRep.ptr = newVec; dupPtr->typePtr = &_scanFmtStringObjType; } static __device__ void UpdateStringOfScanFmt(Jim_Obj *objPtr) { JimSetStringBytes(objPtr, ((ScanFmtStringObj *)objPtr->internalRep.ptr)->stringRep); } // SetScanFmtFromAny will parse a given string and create the internal representation of the format specification. In case of an error // the error data member of the internal representation will be set to an descriptive error text and the function will be left with // JIM_ERROR to indicate unsucessful parsing (aka. malformed scanformat specification static __device__ int SetScanFmtFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { int maxCount, i, lastPos = -1; const char *fmt = objPtr->bytes; int maxFmtLen = objPtr->length; const char *fmtEnd = fmt + maxFmtLen; int curr; Jim_FreeIntRep(interp, objPtr); // Count how many conversions could take place maximally for (i = 0, maxCount = 0; i < maxFmtLen; ++i) if (fmt[i] == '%') ++maxCount; // Calculate an approximation of the memory necessary int approxSize = sizeof(ScanFmtStringObj) // Size of the container + (maxCount + 1) * sizeof(ScanFmtPartDescr) // Size of all partials + maxFmtLen * sizeof(char) + 3 + 1 // Scratch + "%n" + '\0' + maxFmtLen * sizeof(char) + 1 // Original stringrep + maxFmtLen * sizeof(char) // Arg for CHARSETs + (maxCount + 1) * sizeof(char) // '\0' for every partial + 1; // safety byte ScanFmtStringObj *fmtObj = (ScanFmtStringObj *)Jim_Alloc(approxSize); memset(fmtObj, 0, approxSize); fmtObj->size = approxSize; fmtObj->maxPos = 0; fmtObj->scratch = (char *)&fmtObj->descr[maxCount + 1]; fmtObj->stringRep = fmtObj->scratch + maxFmtLen + 3 + 1; memcpy(fmtObj->stringRep, fmt, maxFmtLen); char *buffer = fmtObj->stringRep + maxFmtLen + 1; objPtr->internalRep.ptr = fmtObj; objPtr->typePtr = &_scanFmtStringObjType; for (i = 0, curr = 0; fmt < fmtEnd; ++fmt) { int width = 0, skip; ScanFmtPartDescr *descr = &fmtObj->descr[curr]; fmtObj->count++; descr->width = 0; // Assume width unspecified // Overread and store any "literal" prefix if (*fmt != '%' || fmt[1] == '%') { descr->type = 0; descr->prefix = &buffer[i]; for (; fmt < fmtEnd; ++fmt) { if (*fmt == '%') { if (fmt[1] != '%') break; ++fmt; } buffer[i++] = *fmt; } buffer[i++] = 0; } // Skip the conversion introducing '%' sign ++fmt; // End reached due to non-conversion literal only? if (fmt >= fmtEnd) goto done; descr->pos = 0; // Assume "natural" positioning if (*fmt == '*') { descr->pos = -1; // Okay, conversion will not be assigned ++fmt; } else fmtObj->convCount++; // Otherwise count as assign-conversion // Check if next token is a number (could be width or pos if (sscanf(fmt, "%d%n", &width, &skip) == 1) { fmt += skip; // Was the number a XPG3 position specifier? if (descr->pos != -1 && *fmt == '$') { int prev; ++fmt; descr->pos = width; width = 0; // Look if "natural" postioning and XPG3 one was mixed if ((lastPos == 0 && descr->pos > 0) || (lastPos > 0 && descr->pos == 0)) { fmtObj->error = "cannot mix \"%\" and \"%n$\" conversion specifiers"; return JIM_ERROR; } // Look if this position was already used for (prev = 0; prev < curr; ++prev) { if (fmtObj->descr[prev].pos == -1) continue; if (fmtObj->descr[prev].pos == descr->pos) { fmtObj->error = "variable is assigned by multiple \"%n$\" conversion specifiers"; return JIM_ERROR; } } // Try to find a width after the XPG3 specifier if (sscanf(fmt, "%d%n", &width, &skip) == 1) { descr->width = width; fmt += skip; } if (descr->pos > 0 && (size_t) descr->pos > fmtObj->maxPos) fmtObj->maxPos = descr->pos; } // Number was not a XPG3, so it has to be a width else descr->width = width; } // If positioning mode was undetermined yet, fix this if (lastPos == -1) lastPos = descr->pos; // Handle CHARSET conversion type ... if (*fmt == '[') { int swapped = 1, beg = i, end, j; descr->type = '['; descr->arg = &buffer[i]; ++fmt; if (*fmt == '^') buffer[i++] = *fmt++; if (*fmt == ']') buffer[i++] = *fmt++; while (*fmt && *fmt != ']') buffer[i++] = *fmt++; if (*fmt != ']') { fmtObj->error = "unmatched [ in format string"; return JIM_ERROR; } end = i; buffer[i++] = 0; // In case a range fence was given "backwards", swap it while (swapped) { swapped = 0; for (j = beg + 1; j < end - 1; ++j) if (buffer[j] == '-' && buffer[j - 1] > buffer[j + 1]) { char tmp = buffer[j - 1]; buffer[j - 1] = buffer[j + 1]; buffer[j + 1] = tmp; swapped = 1; } } } else { // Remember any valid modifier if given if (strchr("hlL", *fmt) != 0) descr->modifier = _tolower((int)*fmt++); descr->type = *fmt; if (!strchr("efgcsndoxui", *fmt)) { fmtObj->error = "bad scan conversion character"; return JIM_ERROR; } else if (*fmt == 'c' && descr->width != 0) { fmtObj->error = "field width may not be specified in %c " "conversion"; return JIM_ERROR; } else if (*fmt == 'u' && descr->modifier == 'l') { fmtObj->error = "unsigned wide not supported"; return JIM_ERROR; } } curr++; } done: return JIM_OK; } // Some accessor macros to allow lowlevel access to fields of internal repr #define FormatGetCnvCount(_fo_) ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->convCount #define FormatGetMaxPos(_fo_) ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->maxPos #define FormatGetError(_fo_) ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->error // JimScanAString is used to scan an unspecified string that ends with next WS, or a string that is specified via a charset. static __device__ Jim_Obj *JimScanAString(Jim_Interp *interp, const char *sdescr, const char *str) { char *buffer = Jim_StrDup(str); char *p = buffer; while (*str) { int c; int n; if (!sdescr && isspace(*str)) break; // EOS via WS if unspecified n = utf8_tounicode(str, &c); if (sdescr && !JimCharsetMatch(sdescr, c, JIM_CHARSET_SCAN)) break; while (n--) *p++ = *str++; } *p = 0; return Jim_NewStringObjNoAlloc(interp, buffer, (int)(p - buffer)); } // ScanOneEntry will scan one entry out of the string passed as argument. It use the sscanf() function for this task. After extracting and // converting of the value, the count of scanned characters will be returned of -1 in case of no conversion tool place and string was already scanned thru static __device__ int ScanOneEntry(Jim_Interp *interp, const char *str, int pos, int strLen, ScanFmtStringObj * fmtObj, long idx, Jim_Obj **valObjPtr) { const char *tok; const ScanFmtPartDescr *descr = &fmtObj->descr[idx]; size_t scanned = 0; size_t anchor = pos; int i; Jim_Obj *tmpObj = NULL; // First pessimistically assume, we will not scan anything :-) *valObjPtr = 0; if (descr->prefix) { // There was a prefix given before the conversion, skip it and adjust the string-to-be-parsed accordingly for (i = 0; pos < strLen && descr->prefix[i]; ++i) { // If prefix require, skip WS if (isspace(descr->prefix[i])) while (pos < strLen && isspace(str[pos])) ++pos; // Prefix do not match here, leave the loop else if (descr->prefix[i] != str[pos]) break; // Prefix matched so far, next round else ++pos; } // All of str consumed: EOF condition if (pos >= strLen) return -1; // Not whole prefix consumed, no conversion possible else if (descr->prefix[i] != 0) return 0; } // For all but following conversion, skip leading WS if (descr->type != 'c' && descr->type != '[' && descr->type != 'n') while (isspace(str[pos])) ++pos; // Determine how much skipped/scanned so far scanned = pos - anchor; // %c is a special, simple case. no width // Return pseudo conversion means: how much scanned so far? if (descr->type == 'n') *valObjPtr = Jim_NewIntObj(interp, anchor + scanned); // Cannot scan anything, as str is totally consumed else if (pos >= strLen) return -1; else if (descr->type == 'c') { int c; scanned += utf8_tounicode(&str[pos], &c); *valObjPtr = Jim_NewIntObj(interp, c); return (int)scanned; } else { // Processing of conversions follows ... if (descr->width > 0) { // Do not try to scan as fas as possible but only the given width. To ensure this, we copy the part that should be scanned. size_t sLen = utf8_strlen(&str[pos], strLen - pos); size_t tLen = descr->width > sLen ? sLen : descr->width; tmpObj = Jim_NewStringObjUtf8(interp, str + pos, (int)tLen); tok = tmpObj->bytes; } // As no width was given, simply refer to the original string else tok = &str[pos]; switch (descr->type) { case 'd': case 'o': case 'x': case 'u': case 'i': { char *endp; // Position where the number finished int base = (descr->type == 'o' ? 8 : descr->type == 'x' ? 16 : descr->type == 'i' ? 0 : 10); // Try to scan a number with the given base jim_wide w = (base == 0 ? jim_strtoull(tok, &endp) : strtoull(tok, &endp, base)); if (endp != tok) { // There was some number sucessfully scanned! *valObjPtr = Jim_NewIntObj(interp, w); // Adjust the number-of-chars scanned so far scanned += endp - tok; } // Nothing was scanned. We have to determine if this happened due to e.g. prefix mismatch or input str exhausted else scanned = (*tok ? 0 : -1); break; } case 's': case '[':{ *valObjPtr = JimScanAString(interp, descr->arg, tok); scanned += Jim_Length(*valObjPtr); break; } case 'e': case 'f': case 'g': { char *endp; double value = strtod(tok, &endp); if (endp != tok) { // There was some number sucessfully scanned! *valObjPtr = Jim_NewDoubleObj(interp, value); // Adjust the number-of-chars scanned so far scanned += endp - tok; } // Nothing was scanned. We have to determine if this happened due to e.g. prefix mismatch or input str exhausted else scanned = (*tok ? 0 : -1); break; } } // If a substring was allocated (due to pre-defined width) do not forget to free it if (tmpObj) Jim_FreeNewObj(interp, tmpObj); } return (int)scanned; } // Jim_ScanString is the workhorse of string scanning. It will scan a given string and returns all converted (and not ignored) values in a list back // to the caller. If an error occured, a NULL pointer will be returned __device__ Jim_Obj *Jim_ScanString(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *fmtObjPtr, int flags) { int scanned = 1; const char *str = Jim_String(strObjPtr); int strLen = Jim_Utf8Length(interp, strObjPtr); Jim_Obj *resultList = 0; Jim_Obj *emptyStr = 0; // This should never happen. The format object should already be of the correct type JimPanic(fmtObjPtr->typePtr != &_scanFmtStringObjType, "Jim_ScanString() for non-scan format"); ScanFmtStringObj *fmtObj = (ScanFmtStringObj *)fmtObjPtr->internalRep.ptr; // Check if format specification was valid if (fmtObj->error != 0) { if (flags & JIM_ERRMSG) Jim_SetResultString(interp, fmtObj->error, -1); return 0; } // Allocate a new "shared" empty string for all unassigned conversions emptyStr = Jim_NewEmptyStringObj(interp); Jim_IncrRefCount(emptyStr); // Create a list and fill it with empty strings up to max specified XPG3 resultList = Jim_NewListObj(interp, NULL, 0); int resultc; Jim_Obj **resultVec = 0; size_t i, pos; if (fmtObj->maxPos > 0) { for (i = 0; i < fmtObj->maxPos; ++i) Jim_ListAppendElement(interp, resultList, emptyStr); JimListGetElements(interp, resultList, &resultc, &resultVec); } // Now handle every partial format description for (i = 0, pos = 0; i < fmtObj->count; ++i) { ScanFmtPartDescr *descr = &(fmtObj->descr[i]); Jim_Obj *value = 0; // Only last type may be "literal" w/o conversion - skip it! if (descr->type == 0) continue; // As long as any conversion could be done, we will proceed if (scanned > 0) scanned = ScanOneEntry(interp, str, (int)pos, strLen, fmtObj, (long)i, &value); // In case our first try results in EOF, we will leave if (scanned == -1 && i == 0) goto eof; // Advance next pos-to-be-scanned for the amount scanned already pos += scanned; // value == 0 means no conversion took place so take empty string if (value == 0) value = Jim_NewEmptyStringObj(interp); // If value is a non-assignable one, skip it if (descr->pos == -1) Jim_FreeNewObj(interp, value); // Otherwise append it to the result list if no XPG3 was given else if (descr->pos == 0) Jim_ListAppendElement(interp, resultList, value); else if (resultVec[descr->pos - 1] == emptyStr) { // But due to given XPG3, put the value into the corr. slot Jim_DecrRefCount(interp, resultVec[descr->pos - 1]); Jim_IncrRefCount(value); resultVec[descr->pos - 1] = value; } else { // Otherwise, the slot was already used - free obj and ERROR Jim_FreeNewObj(interp, value); goto err; } } Jim_DecrRefCount(interp, emptyStr); return resultList; eof: Jim_DecrRefCount(interp, emptyStr); Jim_FreeNewObj(interp, resultList); return (Jim_Obj *)EOF; err: Jim_DecrRefCount(interp, emptyStr); Jim_FreeNewObj(interp, resultList); return 0; } #pragma endregion // ----------------------------------------------------------------------------- // Pseudo Random Number Generation // ----------------------------------------------------------------------------- #pragma region Pseudo Random Number Generation // Initialize the sbox with the numbers from 0 to 255 static __device__ void JimPrngInit(Jim_Interp *interp) { #define PRNG_SEED_SIZE 256 time_t t = time(NULL); interp->prngState = (Jim_PrngState *)Jim_Alloc(sizeof(Jim_PrngState)); unsigned int *seed = (unsigned int *)Jim_Alloc(PRNG_SEED_SIZE * sizeof(*seed)); for (int i = 0; i < PRNG_SEED_SIZE; i++) seed[i] = (unsigned int)(rand() ^ t ^ clock()); JimPrngSeed(interp, (unsigned char *)seed, PRNG_SEED_SIZE * sizeof(*seed)); Jim_Free(seed); } // Generates N bytes of random data static __device__ void JimRandomBytes(Jim_Interp *interp, void *dest, unsigned int len) { unsigned char *destByte = (unsigned char *)dest; unsigned int si, sj, x; // initialization, only needed the first time if (interp->prngState == NULL) JimPrngInit(interp); Jim_PrngState *prng = interp->prngState; // generates 'len' bytes of pseudo-random numbers for (x = 0; x < len; x++) { prng->i = (prng->i + 1) & 0xff; si = prng->sbox[prng->i]; prng->j = (prng->j + si) & 0xff; sj = prng->sbox[prng->j]; prng->sbox[prng->i] = sj; prng->sbox[prng->j] = si; *destByte++ = prng->sbox[(si + sj) & 0xff]; } } // Re-seed the generator with user-provided bytes static __device__ void JimPrngSeed(Jim_Interp *interp, unsigned char *seed, int seedLen) { int i; // initialization, only needed the first time if (interp->prngState == NULL) JimPrngInit(interp); Jim_PrngState *prng = interp->prngState; // Set the sbox[i] with i for (i = 0; i < 256; i++) prng->sbox[i] = i; // Now use the seed to perform a random permutation of the sbox for (i = 0; i < seedLen; i++) { unsigned char t = prng->sbox[i & 0xFF]; prng->sbox[i & 0xFF] = prng->sbox[seed[i]]; prng->sbox[seed[i]] = t; } prng->i = prng->j = 0; // discard at least the first 256 bytes of stream. borrow the seed buffer for this for (i = 0; i < 256; i += seedLen) JimRandomBytes(interp, seed, seedLen); } // [incr] static __device__ int Jim_IncrCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { jim_wide wideValue, increment = 1; if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "varName ?increment?"); return JIM_ERROR; } if (argc == 3) if (Jim_GetWide(interp, argv[2], &increment) != JIM_OK) return JIM_ERROR; Jim_Obj *intObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); // Set missing variable to 0 if (!intObjPtr) wideValue = 0; else if (Jim_GetWide(interp, intObjPtr, &wideValue) != JIM_OK) return JIM_ERROR; if (!intObjPtr || Jim_IsShared(intObjPtr)) { intObjPtr = Jim_NewIntObj(interp, wideValue + increment); if (Jim_SetVariable(interp, argv[1], intObjPtr) != JIM_OK) { Jim_FreeNewObj(interp, intObjPtr); return JIM_ERROR; } } else { // Can do it the quick way Jim_InvalidateStringRep(intObjPtr); JimWideValue(intObjPtr) = wideValue + increment; // The following step is required in order to invalidate the string repr of "FOO" if the var name is on the form of "FOO(IDX)" if (argv[1]->typePtr != &_variableObjType) Jim_SetVariable(interp, argv[1], intObjPtr); // Note that this can't fail since GetVariable already succeeded } Jim_SetResult(interp, intObjPtr); return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Eval // ----------------------------------------------------------------------------- #pragma region Eval #define JIM_EVAL_SARGV_LEN 8 // static arguments vector length #define JIM_EVAL_SINTV_LEN 8 // static interpolation vector length // Handle calls to the [unknown] command static __device__ int JimUnknown(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { // If JimUnknown() is recursively called too many times... done here if (interp->unknown_called > 50) return JIM_ERROR; // The object interp->unknown just contains the "unknown" string, it is used in order to avoid to lookup the unknown command every time but instead to cache the result. // If the [unknown] command does not exist ... if (Jim_GetCommand(interp, interp->unknown, JIM_NONE) == NULL) return JIM_ERROR; interp->unknown_called++; // XXX: Are we losing fileNameObj and linenr? int retcode = Jim_EvalObjPrefix(interp, interp->unknown, argc, argv); interp->unknown_called--; return retcode; } static __device__ int JimInvokeCommand(Jim_Interp *interp, int objc, Jim_Obj *const *objv) { #if 0 printf("invoke"); for (int j = 0; j < objc; j++) printf(" '%s'", Jim_String(objv[j])); printf("\n"); #endif int retcode; Jim_Cmd *cmdPtr; if (interp->framePtr->tailcallCmd) { // Special tailcall command was pre-resolved cmdPtr = interp->framePtr->tailcallCmd; interp->framePtr->tailcallCmd = NULL; } else { cmdPtr = Jim_GetCommand(interp, objv[0], JIM_ERRMSG); if (cmdPtr == NULL) return JimUnknown(interp, objc, objv); JimIncrCmdRefCount(cmdPtr); } if (interp->evalDepth == interp->maxEvalDepth) { Jim_SetResultString(interp, "Infinite eval recursion", -1); retcode = JIM_ERROR; goto out; } interp->evalDepth++; // Call it -- Make sure result is an empty object. Jim_ResetResult(interp); if (cmdPtr->isproc) retcode = JimCallProcedure(interp, cmdPtr, objc, objv); else { ClientData clientData = interp->cmdPrivData = cmdPtr->u.native.privData; retcode = cmdPtr->u.native.cmdProc(clientData, interp, objc, objv); } interp->evalDepth--; out: JimDecrCmdRefCount(interp, cmdPtr); return retcode; } // Eval the object vector 'objv' composed of 'objc' elements. Every element is used as single argument. // Jim_EvalObj() will call this function every time its object argument is of "list" type, with no string representation. // // This is possible because the string representation of a list object generated by the UpdateStringOfList is made // in a way that ensures that every list element is a different command argument. __device__ int Jim_EvalObjVector(Jim_Interp *interp, int objc, Jim_Obj *const *objv) { // Incr refcount of arguments int i; for (i = 0; i < objc; i++) Jim_IncrRefCount(objv[i]); int retcode = JimInvokeCommand(interp, objc, objv); // Decr refcount of arguments and return the retcode for (i = 0; i < objc; i++) Jim_DecrRefCount(interp, objv[i]); return retcode; } // Invokes 'prefix' as a command with the objv array as arguments. __device__ int Jim_EvalObjPrefix(Jim_Interp *interp, Jim_Obj *prefix, int objc, Jim_Obj *const *objv) { Jim_Obj **nargv = (Jim_Obj **)Jim_Alloc((objc + 1) * sizeof(*nargv)); nargv[0] = prefix; memcpy(&nargv[1], &objv[0], sizeof(nargv[0]) * objc); int ret = Jim_EvalObjVector(interp, objc + 1, nargv); Jim_Free(nargv); return ret; } static __device__ void JimAddErrorToStack(Jim_Interp *interp, ScriptObj *script) { if (!interp->errorFlag) { // This is the first error, so save the file/line information and reset the stack interp->errorFlag = 1; Jim_IncrRefCount(script->fileNameObj); Jim_DecrRefCount(interp, interp->errorFileNameObj); interp->errorFileNameObj = script->fileNameObj; interp->errorLine = script->linenr; JimResetStackTrace(interp); // Always add a level where the error first occurs interp->addStackTrace++; } // Now if this is an "interesting" level, add it to the stack trace if (interp->addStackTrace > 0) { // Add the stack info for the current level JimAppendStackTrace(interp, Jim_String(interp->errorProc), script->fileNameObj, script->linenr); // Note: if we didn't have a filename for this level, don't clear the addStackTrace flag so we can pick it up at the next level if (Jim_Length(script->fileNameObj)) interp->addStackTrace = 0; Jim_DecrRefCount(interp, interp->errorProc); interp->errorProc = interp->emptyObj; Jim_IncrRefCount(interp->errorProc); } } static __device__ int JimSubstOneToken(Jim_Interp *interp, const ScriptToken *token, Jim_Obj **objPtrPtr) { Jim_Obj *objPtr; switch (token->type) { case JIM_TT_STR: case JIM_TT_ESC: objPtr = token->objPtr; break; case JIM_TT_VAR: objPtr = Jim_GetVariable(interp, token->objPtr, JIM_ERRMSG); break; case JIM_TT_DICTSUGAR: objPtr = JimExpandDictSugar(interp, token->objPtr); break; case JIM_TT_EXPRSUGAR: objPtr = JimExpandExprSugar(interp, token->objPtr); break; case JIM_TT_CMD: switch (Jim_EvalObj(interp, token->objPtr)) { case JIM_OK: case JIM_RETURN: objPtr = interp->result; break; case JIM_BREAK: return JIM_BREAK; // Stop substituting case JIM_CONTINUE: return JIM_CONTINUE; // just skip this one default: return JIM_ERROR; } break; default: JimPanic(1, "default token type (%d) reached " "in Jim_SubstObj().", token->type); objPtr = NULL; break; } if (objPtr) { *objPtrPtr = objPtr; return JIM_OK; } return JIM_ERROR; } // Interpolate the given tokens into a unique Jim_Obj returned by reference via *objPtrPtr. This function is only called by Jim_EvalObj() and Jim_SubstObj() The returned object has refcount = 0. static __device__ Jim_Obj *JimInterpolateTokens(Jim_Interp *interp, const ScriptToken * token, int tokens, int flags) { int totlen = 0, i; Jim_Obj *sintv[JIM_EVAL_SINTV_LEN]; Jim_Obj **intv = (tokens <= JIM_EVAL_SINTV_LEN ? sintv : (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * tokens)); // Compute every token forming the argument in the intv objects vector. for (i = 0; i < tokens; i++) { switch (JimSubstOneToken(interp, &token[i], &intv[i])) { case JIM_OK: case JIM_RETURN: break; case JIM_BREAK: if (flags & JIM_SUBST_FLAG) { // Stop here tokens = i; continue; } // XXX: Should probably set an error about break outside loop // fall through to error case JIM_CONTINUE: if (flags & JIM_SUBST_FLAG) { intv[i] = NULL; continue; } // XXX: Ditto continue outside loop // fall through to error default: while (i--) Jim_DecrRefCount(interp, intv[i]); if (intv != sintv) Jim_Free(intv); return NULL; } Jim_IncrRefCount(intv[i]); Jim_String(intv[i]); totlen += intv[i]->length; } // Fast path return for a single token if (tokens == 1 && intv[0] && intv == sintv) { Jim_DecrRefCount(interp, intv[0]); return intv[0]; } // Concatenate every token in an unique object. Jim_Obj *objPtr = Jim_NewStringObjNoAlloc(interp, NULL, 0); if (tokens == 4 && token[0].type == JIM_TT_ESC && token[1].type == JIM_TT_ESC && token[2].type == JIM_TT_VAR) { // May be able to do fast interpolated object -> dictSubst objPtr->typePtr = &_interpolatedObjType; objPtr->internalRep.dictSubstValue.varNameObjPtr = token[0].objPtr; objPtr->internalRep.dictSubstValue.indexObjPtr = intv[2]; Jim_IncrRefCount(intv[2]); } // The first interpolated token is source, so preserve the source info else if (tokens && intv[0] && intv[0]->typePtr == &_sourceObjType) JimSetSourceInfo(interp, objPtr, intv[0]->internalRep.sourceValue.fileNameObj, intv[0]->internalRep.sourceValue.lineNumber); char *s = objPtr->bytes = (char *)Jim_Alloc(totlen + 1); objPtr->length = totlen; for (i = 0; i < tokens; i++) { if (intv[i]) { memcpy(s, intv[i]->bytes, intv[i]->length); s += intv[i]->length; Jim_DecrRefCount(interp, intv[i]); } } objPtr->bytes[totlen] = '\0'; // Free the intv vector if not static. if (intv != sintv) Jim_Free(intv); return objPtr; } // listPtr *must* be a list. The contents of the list is evaluated with the first element as the command and the remaining elements as the arguments. static __device__ int JimEvalObjList(Jim_Interp *interp, Jim_Obj *listPtr) { int retcode = JIM_OK; JimPanic(Jim_IsList(listPtr) == 0, "JimEvalObjList() invoked on non-list."); if (listPtr->internalRep.listValue.len) { Jim_IncrRefCount(listPtr); retcode = JimInvokeCommand(interp, listPtr->internalRep.listValue.len, listPtr->internalRep.listValue.ele); Jim_DecrRefCount(interp, listPtr); } return retcode; } __device__ int Jim_EvalObjList(Jim_Interp *interp, Jim_Obj *listPtr) { SetListFromAny(interp, listPtr); return JimEvalObjList(interp, listPtr); } __device__ int Jim_EvalObj(Jim_Interp *interp, Jim_Obj *scriptObjPtr) { int i; ScriptToken *token; int retcode = JIM_OK; Jim_Obj *sargv[JIM_EVAL_SARGV_LEN], **argv = NULL; Jim_Obj *prevScriptObj; // If the object is of type "list", with no string rep we can call a specialized version of Jim_EvalObj() if (Jim_IsList(scriptObjPtr) && scriptObjPtr->bytes == NULL) return JimEvalObjList(interp, scriptObjPtr); Jim_IncrRefCount(scriptObjPtr); // Make sure it's shared ScriptObj *script = JimGetScript(interp, scriptObjPtr); if (!JimScriptValid(interp, script)) { Jim_DecrRefCount(interp, scriptObjPtr); return JIM_ERROR; } // Reset the interpreter result. This is useful to return the empty result in the case of empty program. Jim_ResetResult(interp); token = script->token; #ifdef JIM_OPTIMIZATION // Check for one of the following common scripts used by for, while {} // incr a if (script->len == 0) { Jim_DecrRefCount(interp, scriptObjPtr); return JIM_OK; } if (script->len == 3 && token[1].objPtr->typePtr == &_commandObjType && token[1].objPtr->internalRep.cmdValue.cmdPtr->isproc == 0 && token[1].objPtr->internalRep.cmdValue.cmdPtr->u.native.cmdProc == Jim_IncrCoreCommand && token[2].objPtr->typePtr == &_variableObjType) { Jim_Obj *objPtr = Jim_GetVariable(interp, token[2].objPtr, JIM_NONE); if (objPtr && !Jim_IsShared(objPtr) && objPtr->typePtr == &_intObjType) { JimWideValue(objPtr)++; Jim_InvalidateStringRep(objPtr); Jim_DecrRefCount(interp, scriptObjPtr); Jim_SetResult(interp, objPtr); return JIM_OK; } } #endif // Now we have to make sure the internal repr will not be freed on shimmering. // Think for example to this: // set x {llength $x; ... some more code ...}; eval $x // In order to preserve the internal rep, we increment the inUse field of the script internal rep structure. script->inUse++; // Stash the current script prevScriptObj = interp->currentScriptObj; interp->currentScriptObj = scriptObjPtr; interp->errorFlag = 0; argv = sargv; // Execute every command sequentially until the end of the script or an error occurs. for (i = 0; i < script->len && retcode == JIM_OK; ) { // First token of the line is always JIM_TT_LINE int argc = token[i].objPtr->internalRep.scriptLineValue.argc; script->linenr = token[i].objPtr->internalRep.scriptLineValue.line; // Allocate the arguments vector if required if (argc > JIM_EVAL_SARGV_LEN) argv = (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * argc); // Skip the JIM_TT_LINE token i++; // Populate the arguments objects. If an error occurs, retcode will be set and 'j' will be set to the number of args expanded int j; for (j = 0; j < argc; j++) { long wordtokens = 1; int expand = 0; Jim_Obj *wordObjPtr = NULL; if (token[i].type == JIM_TT_WORD) { wordtokens = (long)JimWideValue(token[i++].objPtr); if (wordtokens < 0) { expand = 1; wordtokens = -wordtokens; } } // Fast path if the token does not need interpolation if (wordtokens == 1) switch (token[i].type) { case JIM_TT_ESC: case JIM_TT_STR: wordObjPtr = token[i].objPtr; break; case JIM_TT_VAR: wordObjPtr = Jim_GetVariable(interp, token[i].objPtr, JIM_ERRMSG); break; case JIM_TT_EXPRSUGAR: wordObjPtr = JimExpandExprSugar(interp, token[i].objPtr); break; case JIM_TT_DICTSUGAR: wordObjPtr = JimExpandDictSugar(interp, token[i].objPtr); break; case JIM_TT_CMD: retcode = Jim_EvalObj(interp, token[i].objPtr); if (retcode == JIM_OK) wordObjPtr = Jim_GetResult(interp); break; default: JimPanic(1, "default token type reached " "in Jim_EvalObj()."); } // For interpolation we call a helper function to do the work for us. else wordObjPtr = JimInterpolateTokens(interp, token + i, wordtokens, JIM_NONE); if (!wordObjPtr) { if (retcode == JIM_OK) retcode = JIM_ERROR; break; } Jim_IncrRefCount(wordObjPtr); i += wordtokens; if (!expand) argv[j] = wordObjPtr; else { // Need to expand wordObjPtr into multiple args from argv[j] ... int len = Jim_ListLength(interp, wordObjPtr); int newargc = argc + len - 1; int k; if (len > 1) { if (argv == sargv) { if (newargc > JIM_EVAL_SARGV_LEN) { argv = (Jim_Obj **)Jim_Alloc(sizeof(*argv) * newargc); memcpy(argv, sargv, sizeof(*argv) * j); } } // Need to realloc to make room for (len - 1) more entries else argv = (Jim_Obj **)Jim_Realloc(argv, sizeof(*argv) * newargc); } // Now copy in the expanded version for (k = 0; k < len; k++) { argv[j++] = wordObjPtr->internalRep.listValue.ele[k]; Jim_IncrRefCount(wordObjPtr->internalRep.listValue.ele[k]); } // The original object reference is no longer needed, after the expansion it is no longer present on the argument vector, but the single elements are in its place. Jim_DecrRefCount(interp, wordObjPtr); // And update the indexes j--; argc += len - 1; } } if (retcode == JIM_OK && argc) { // Invoke the command retcode = JimInvokeCommand(interp, argc, argv); // Check for a signal after each command if (Jim_CheckSignal(interp)) retcode = JIM_SIGNAL; } // Finished with the command, so decrement ref counts of each argument while (j-- > 0) Jim_DecrRefCount(interp, argv[j]); if (argv != sargv) { Jim_Free(argv); argv = sargv; } } // Possibly add to the error stack trace if (retcode == JIM_ERROR) JimAddErrorToStack(interp, script); // Propagate the addStackTrace value through 'return -code error' // No need to add stack trace else if (retcode != JIM_RETURN || interp->returnCode != JIM_ERROR) interp->addStackTrace = 0; // Restore the current script interp->currentScriptObj = prevScriptObj; // Note that we don't have to decrement inUse, because the following code transfers our use of the reference again to the script object. Jim_FreeIntRep(interp, scriptObjPtr); scriptObjPtr->typePtr = &_scriptObjType; Jim_SetIntRepPtr(scriptObjPtr, script); Jim_DecrRefCount(interp, scriptObjPtr); return retcode; } static __device__ int JimSetProcArg(Jim_Interp *interp, Jim_Obj *argNameObj, Jim_Obj *argValObj) { int retcode; // If argObjPtr begins with '&', do an automatic upvar const char *varname = Jim_String(argNameObj); if (*varname == '&') { // First check that the target variable exists Jim_Obj *objPtr; Jim_CallFrame *savedCallFrame = interp->framePtr; interp->framePtr = interp->framePtr->parent; objPtr = Jim_GetVariable(interp, argValObj, JIM_ERRMSG); interp->framePtr = savedCallFrame; if (!objPtr) return JIM_ERROR; // It exists, so perform the binding. objPtr = Jim_NewStringObj(interp, varname + 1, -1); Jim_IncrRefCount(objPtr); retcode = Jim_SetVariableLink(interp, objPtr, argValObj, interp->framePtr->parent); Jim_DecrRefCount(interp, objPtr); } else retcode = Jim_SetVariable(interp, argNameObj, argValObj); return retcode; } // Sets the interp result to be an error message indicating the required proc args. static __device__ void JimSetProcWrongArgs(Jim_Interp *interp, Jim_Obj *procNameObj, Jim_Cmd *cmd) { // Create a nice error message, consistent with Tcl 8.5 Jim_Obj *argmsg = Jim_NewStringObj(interp, "", 0); for (int i = 0; i < cmd->u.proc.argListLen; i++) { Jim_AppendString(interp, argmsg, " ", 1); if (i == cmd->u.proc.argsPos) { if (cmd->u.proc.arglist[i].defaultObjPtr) { // Renamed args Jim_AppendString(interp, argmsg, "?", 1); Jim_AppendObj(interp, argmsg, cmd->u.proc.arglist[i].defaultObjPtr); Jim_AppendString(interp, argmsg, " ...?", -1); } // We have plain args else Jim_AppendString(interp, argmsg, "?arg...?", -1); } else { if (cmd->u.proc.arglist[i].defaultObjPtr) { Jim_AppendString(interp, argmsg, "?", 1); Jim_AppendObj(interp, argmsg, cmd->u.proc.arglist[i].nameObjPtr); Jim_AppendString(interp, argmsg, "?", 1); } else { const char *arg = Jim_String(cmd->u.proc.arglist[i].nameObjPtr); if (*arg == '&') arg++; Jim_AppendString(interp, argmsg, arg, -1); } } } Jim_SetResultFormatted(interp, "wrong # args: should be \"%#s%#s\"", procNameObj, argmsg); Jim_FreeNewObj(interp, argmsg); } #ifdef jim_ext_namespace // [namespace eval] __device__ int Jim_EvalNamespace(Jim_Interp *interp, Jim_Obj *scriptObj, Jim_Obj *nsObj) { // Create a new callframe Jim_CallFrame *callFramePtr = JimCreateCallFrame(interp, interp->framePtr, nsObj); callFramePtr->argv = &interp->emptyObj; callFramePtr->argc = 0; callFramePtr->procArgsObjPtr = NULL; callFramePtr->procBodyObjPtr = scriptObj; callFramePtr->staticVars = NULL; callFramePtr->fileNameObj = interp->emptyObj; callFramePtr->line = 0; Jim_IncrRefCount(scriptObj); interp->framePtr = callFramePtr; // Check if there are too nested calls int retcode; if (interp->framePtr->level == interp->maxCallFrameDepth) { Jim_SetResultString(interp, "Too many nested calls. Infinite recursion?", -1); retcode = JIM_ERROR; } // Eval the body else retcode = Jim_EvalObj(interp, scriptObj); // Destroy the callframe interp->framePtr = interp->framePtr->parent; JimFreeCallFrame(interp, callFramePtr, JIM_FCF_REUSE); return retcode; } #endif // Call a procedure implemented in Tcl. It's possible to speed-up a lot this function, currently the callframes are not cached, but allocated and // destroied every time. What is expecially costly is to create/destroy the local vars hash table every time. // This can be fixed just implementing callframes caching in JimCreateCallFrame() and JimFreeCallFrame(). static __device__ int JimCallProcedure(Jim_Interp *interp, Jim_Cmd *cmd, int argc, Jim_Obj *const *argv) { // Check arity if (argc - 1 < cmd->u.proc.reqArity || (cmd->u.proc.argsPos < 0 && argc - 1 > cmd->u.proc.reqArity + cmd->u.proc.optArity)) { JimSetProcWrongArgs(interp, argv[0], cmd); return JIM_ERROR; } // Optimise for procedure with no body - useful for optional debugging if (Jim_Length(cmd->u.proc.bodyObjPtr) == 0) return JIM_OK; // Check if there are too nested calls if (interp->framePtr->level == interp->maxCallFrameDepth) { Jim_SetResultString(interp, "Too many nested calls. Infinite recursion?", -1); return JIM_ERROR; } // Create a new callframe Jim_CallFrame *callFramePtr = JimCreateCallFrame(interp, interp->framePtr, cmd->u.proc.nsObj); callFramePtr->argv = argv; callFramePtr->argc = argc; callFramePtr->procArgsObjPtr = cmd->u.proc.argListObjPtr; callFramePtr->procBodyObjPtr = cmd->u.proc.bodyObjPtr; callFramePtr->staticVars = cmd->u.proc.staticVars; // Remember where we were called from ScriptObj *script = JimGetScript(interp, interp->currentScriptObj); callFramePtr->fileNameObj = script->fileNameObj; callFramePtr->line = script->linenr; Jim_IncrRefCount(cmd->u.proc.argListObjPtr); Jim_IncrRefCount(cmd->u.proc.bodyObjPtr); interp->framePtr = callFramePtr; // How many optional args are available int optargs = (argc - 1 - cmd->u.proc.reqArity); int retcode; // Step 'i' along the actual args, and step 'd' along the formal args int i = 1; for (int d = 0; d < cmd->u.proc.argListLen; d++) { Jim_Obj *nameObjPtr = cmd->u.proc.arglist[d].nameObjPtr; if (d == cmd->u.proc.argsPos) { // assign $args Jim_Obj *listObjPtr; int argsLen = 0; if (cmd->u.proc.reqArity + cmd->u.proc.optArity < argc - 1) argsLen = argc - 1 - (cmd->u.proc.reqArity + cmd->u.proc.optArity); listObjPtr = Jim_NewListObj(interp, &argv[i], argsLen); /* It is possible to rename args. */ if (cmd->u.proc.arglist[d].defaultObjPtr) { nameObjPtr =cmd->u.proc.arglist[d].defaultObjPtr; } retcode = Jim_SetVariable(interp, nameObjPtr, listObjPtr); if (retcode != JIM_OK) goto badargset; i += argsLen; continue; } // Optional or required? if (cmd->u.proc.arglist[d].defaultObjPtr == NULL || optargs-- > 0) retcode = JimSetProcArg(interp, nameObjPtr, argv[i++]); // Ran out, so use the default else retcode = Jim_SetVariable(interp, nameObjPtr, cmd->u.proc.arglist[d].defaultObjPtr); if (retcode != JIM_OK) goto badargset; } // Eval the body retcode = Jim_EvalObj(interp, cmd->u.proc.bodyObjPtr); badargset: // Free the callframe interp->framePtr = interp->framePtr->parent; JimFreeCallFrame(interp, callFramePtr, JIM_FCF_REUSE); // Now chain any tailcalls in the parent frame if (interp->framePtr->tailcallObj) { do { Jim_Obj *tailcallObj = interp->framePtr->tailcallObj; interp->framePtr->tailcallObj = NULL; if (retcode == JIM_EVAL) { retcode = Jim_EvalObjList(interp, tailcallObj); // If the result of the tailcall is 'return', push it up to the caller if (retcode == JIM_RETURN) interp->returnLevel++; } Jim_DecrRefCount(interp, tailcallObj); } while (interp->framePtr->tailcallObj); // If the tailcall chain finished early, may need to manually discard the command if (interp->framePtr->tailcallCmd) { JimDecrCmdRefCount(interp, interp->framePtr->tailcallCmd); interp->framePtr->tailcallCmd = NULL; } } // Handle the JIM_RETURN return code if (retcode == JIM_RETURN) { if (--interp->returnLevel <= 0) { retcode = interp->returnCode; interp->returnCode = JIM_OK; interp->returnLevel = 0; } } else if (retcode == JIM_ERROR) { interp->addStackTrace++; Jim_DecrRefCount(interp, interp->errorProc); interp->errorProc = argv[0]; Jim_IncrRefCount(interp->errorProc); } return retcode; } __device__ int Jim_EvalSource(Jim_Interp *interp, const char *filename, int lineno, const char *script) { int retval; Jim_Obj *scriptObjPtr = Jim_NewStringObj(interp, script, -1); Jim_IncrRefCount(scriptObjPtr); if (filename) { Jim_Obj *prevScriptObj; JimSetSourceInfo(interp, scriptObjPtr, Jim_NewStringObj(interp, filename, -1), lineno); prevScriptObj = interp->currentScriptObj; interp->currentScriptObj = scriptObjPtr; retval = Jim_EvalObj(interp, scriptObjPtr); interp->currentScriptObj = prevScriptObj; } else retval = Jim_EvalObj(interp, scriptObjPtr); Jim_DecrRefCount(interp, scriptObjPtr); return retval; } __device__ int Jim_Eval(Jim_Interp *interp, const char *script) { return Jim_EvalObj(interp, Jim_NewStringObj(interp, script, -1)); } // Execute script in the scope of the global level __device__ int Jim_EvalGlobal(Jim_Interp *interp, const char *script) { Jim_CallFrame *savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; int retval = Jim_Eval(interp, script); interp->framePtr = savedFramePtr; return retval; } __device__ int Jim_EvalFileGlobal(Jim_Interp *interp, const char *filename) { Jim_CallFrame *savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; int retval = Jim_EvalFile(interp, filename); interp->framePtr = savedFramePtr; return retval; } #include <sys/statcu.h> __device__ int Jim_EvalFile(Jim_Interp *interp, const char *filename) { FILE *fp; struct stat sb; if (stat(filename, &sb) != 0 || (fp = fopen(filename, "rt")) == NULL) { Jim_SetResultFormatted(interp, "couldn't read file \"%s\": %s", filename, strerror(errno)); return JIM_ERROR; } if (sb.st_size == 0) { fclose(fp); return JIM_OK; } char *buf = (char *)Jim_Alloc(sb.st_size + 1); int readlen = (int)fread(buf, 1, sb.st_size, fp); if (ferror(fp)) { fclose(fp); Jim_Free(buf); Jim_SetResultFormatted(interp, "failed to load file \"%s\": %s", filename, strerror(errno)); return JIM_ERROR; } fclose(fp); buf[readlen] = 0; Jim_Obj *scriptObjPtr = Jim_NewStringObjNoAlloc(interp, buf, readlen); JimSetSourceInfo(interp, scriptObjPtr, Jim_NewStringObj(interp, filename, -1), 1); Jim_IncrRefCount(scriptObjPtr); Jim_Obj *prevScriptObj = interp->currentScriptObj; interp->currentScriptObj = scriptObjPtr; int retcode = Jim_EvalObj(interp, scriptObjPtr); // Handle the JIM_RETURN return code if (retcode == JIM_RETURN) { if (--interp->returnLevel <= 0) { retcode = interp->returnCode; interp->returnCode = JIM_OK; interp->returnLevel = 0; } } // EvalFile changes context, so add a stack frame here if (retcode == JIM_ERROR) interp->addStackTrace++; interp->currentScriptObj = prevScriptObj; Jim_DecrRefCount(interp, scriptObjPtr); return retcode; } #pragma endregion // ----------------------------------------------------------------------------- // Subst // ----------------------------------------------------------------------------- #pragma region Subst static __device__ void JimParseSubst(struct JimParserCtx *pc, int flags) { pc->tstart = pc->p; pc->tline = pc->linenr; if (pc->len == 0) { pc->tend = pc->p; pc->tt = JIM_TT_EOL; pc->eof = 1; return; } if (*pc->p == '[' && !(flags & JIM_SUBST_NOCMD)) { JimParseCmd(pc); return; } if (*pc->p == '$' && !(flags & JIM_SUBST_NOVAR)) { if (JimParseVar(pc) == JIM_OK) return; // Not a var, so treat as a string pc->tstart = pc->p; flags |= JIM_SUBST_NOVAR; } while (pc->len) { if (*pc->p == '$' && !(flags & JIM_SUBST_NOVAR)) break; if (*pc->p == '[' && !(flags & JIM_SUBST_NOCMD)) break; if (*pc->p == '\\' && pc->len > 1) { pc->p++; pc->len--; } pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = (flags & JIM_SUBST_NOESC) ? JIM_TT_STR : JIM_TT_ESC; } // The subst object type reuses most of the data structures and functions of the script object. Script's data structures are a bit more complex // for what is needed for [subst]itution tasks, but the reuse helps to deal with a single data structure at the cost of some more memory usage for substitutions. // This method takes the string representation of an object as a Tcl string where to perform [subst]itution, and generates the pre-parsed internal representation. static __device__ int SetSubstFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr, int flags) { int scriptTextLen; const char *scriptText = Jim_GetString(objPtr, &scriptTextLen); // Initially parse the subst into tokens (in tokenlist) ParseTokenList tokenlist; ScriptTokenListInit(&tokenlist); struct JimParserCtx parser; JimParserInit(&parser, scriptText, scriptTextLen, 1); while (1) { JimParseSubst(&parser, flags); if (parser.eof) // Note that subst doesn't need the EOL token break; ScriptAddToken(&tokenlist, parser.tstart, (int)(parser.tend - parser.tstart + 1), parser.tt, parser.tline); } // Create the "real" subst/script tokens from the initial token list struct ScriptObj *script = (ScriptObj *)Jim_Alloc(sizeof(*script)); script->inUse = 1; script->substFlags = flags; script->fileNameObj = interp->emptyObj; Jim_IncrRefCount(script->fileNameObj); SubstObjAddTokens(interp, script, &tokenlist); // No longer need the token list ScriptTokenListFree(&tokenlist); #ifdef DEBUG_SHOW_SUBST { printf("==== Subst ====\n"); for (int i = 0; i < script->len; i++) printf("[%2d] %s '%s'\n", i, jim_tt_name(script->token[i].type), Jim_String(script->token[i].objPtr)); } #endif // Free the old internal rep and set the new one Jim_FreeIntRep(interp, objPtr); Jim_SetIntRepPtr(objPtr, script); objPtr->typePtr = &_scriptObjType; return JIM_OK; } static __device__ ScriptObj *Jim_GetSubst(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { if (objPtr->typePtr != &_scriptObjType || ((ScriptObj *)Jim_GetIntRepPtr(objPtr))->substFlags != flags) SetSubstFromAny(interp, objPtr, flags); return (ScriptObj *) Jim_GetIntRepPtr(objPtr); } // Performs commands,variables,blackslashes substitution, storing the result object (with refcount 0) into resObjPtrPtr. __device__ int Jim_SubstObj(Jim_Interp *interp, Jim_Obj *substObjPtr, Jim_Obj **resObjPtrPtr, int flags) { ScriptObj *script = Jim_GetSubst(interp, substObjPtr, flags); Jim_IncrRefCount(substObjPtr); // Make sure it's shared // In order to preserve the internal rep, we increment the inUse field of the script internal rep structure. script->inUse++; *resObjPtrPtr = JimInterpolateTokens(interp, script->token, script->len, flags); script->inUse--; Jim_DecrRefCount(interp, substObjPtr); return (*resObjPtrPtr == NULL ? JIM_ERROR : JIM_OK); } #pragma endregion // ----------------------------------------------------------------------------- // Core commands utility functions // ----------------------------------------------------------------------------- #pragma region Core commands utility functions __device__ void Jim_WrongNumArgs(Jim_Interp *interp, int argc, Jim_Obj *const *argv, const char *msg) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, argv, argc); if (*msg) Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, msg, -1)); Jim_IncrRefCount(listObjPtr); Jim_Obj *objPtr = Jim_ListJoin(interp, listObjPtr, " ", 1); Jim_DecrRefCount(interp, listObjPtr); Jim_IncrRefCount(objPtr); Jim_SetResultFormatted(interp, "wrong # args: should be \"%#s\"", objPtr); Jim_DecrRefCount(interp, objPtr); } // May add the key and/or value to the list. typedef void JimHashtableIteratorCallbackType(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type); #define JimTrivialMatch(pattern) (strpbrk((pattern), "*[?\\") == NULL) // For each key of the hash table 'ht' (with string keys) which matches the glob pattern (all if NULL), invoke the callback to add entries to a list. Returns the list. static __device__ Jim_Obj *JimHashtablePatternMatch(Jim_Interp *interp, Jim_HashTable *ht, Jim_Obj *patternObjPtr, JimHashtableIteratorCallbackType *callback, int type) { Jim_HashEntry *he; Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); // Check for the non-pattern case. We can do this much more efficiently. if (patternObjPtr && JimTrivialMatch(Jim_String(patternObjPtr))) { he = Jim_FindHashEntry(ht, Jim_String(patternObjPtr)); if (he) callback(interp, listObjPtr, he, type); } else { Jim_HashTableIterator htiter; JimInitHashTableIterator(ht, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) if (patternObjPtr == NULL || JimGlobMatch(Jim_String(patternObjPtr), (const char *)he->key, 0)) callback(interp, listObjPtr, he, type); } return listObjPtr; } // Keep these in order #define JIM_CMDLIST_COMMANDS 0 #define JIM_CMDLIST_PROCS 1 #define JIM_CMDLIST_CHANNELS 2 // Adds matching command names (procs, channels) to the list. static __device__ void JimCommandMatch(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type) { Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_GetHashEntryVal(he); // not a proc if (type == JIM_CMDLIST_PROCS && !cmdPtr->isproc) return; Jim_Obj *objPtr = Jim_NewStringObj(interp, (const char *)he->key, -1); Jim_IncrRefCount(objPtr); if (type != JIM_CMDLIST_CHANNELS || Jim_AioFilehandle(interp, objPtr)) Jim_ListAppendElement(interp, listObjPtr, objPtr); Jim_DecrRefCount(interp, objPtr); } // type is JIM_CMDLIST_xxx static __device__ Jim_Obj *JimCommandsList(Jim_Interp *interp, Jim_Obj *patternObjPtr, int type) { return JimHashtablePatternMatch(interp, &interp->commands, patternObjPtr, JimCommandMatch, type); } // Keep these in order #define JIM_VARLISTGLOBAL_S 0 #define JIM_VARLIST_LOCALS 1 #define JIM_VARLIST_VARS 2 #define JIM_VARLIST_VALUES 0x1000 // Adds matching variable names to the list static __device__ void JimVariablesMatch(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type) { Jim_Var *varPtr = (Jim_Var *)Jim_GetHashEntryVal(he); if (type != JIM_VARLIST_LOCALS || varPtr->linkFramePtr == NULL) { Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, (const char *)he->key, -1)); if (type & JIM_VARLIST_VALUES) Jim_ListAppendElement(interp, listObjPtr, varPtr->objPtr); } } // mode is JIM_VARLIST_xxx static __device__ Jim_Obj *JimVariablesList(Jim_Interp *interp, Jim_Obj *patternObjPtr, int mode) { // For [info locals], if we are at top level an emtpy list is returned. I don't agree, but we aim at compatibility (SS) if (mode == JIM_VARLIST_LOCALS && interp->framePtr == interp->topFramePtr) return interp->emptyObj; else { Jim_CallFrame *framePtr = (mode == JIM_VARLISTGLOBAL_S ? interp->topFramePtr : interp->framePtr); return JimHashtablePatternMatch(interp, &framePtr->vars, patternObjPtr, JimVariablesMatch, mode); } } static __device__ int JimInfoLevel(Jim_Interp *interp, Jim_Obj *levelObjPtr, Jim_Obj **objPtrPtr, int info_level_cmd) { Jim_CallFrame *targetCallFrame = JimGetCallFrameByInteger(interp, levelObjPtr); if (targetCallFrame == NULL) return JIM_ERROR; // No proc call at toplevel callframe if (targetCallFrame == interp->topFramePtr) { Jim_SetResultFormatted(interp, "bad level \"%#s\"", levelObjPtr); return JIM_ERROR; } if (info_level_cmd) *objPtrPtr = Jim_NewListObj(interp, targetCallFrame->argv, targetCallFrame->argc); else { Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); Jim_ListAppendElement(interp, listObj, targetCallFrame->argv[0]); Jim_ListAppendElement(interp, listObj, targetCallFrame->fileNameObj); Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, targetCallFrame->line)); *objPtrPtr = listObj; } return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Core commands // ----------------------------------------------------------------------------- #pragma region Core commands // fake [puts] -- not the real puts, just for debugging. static __device__ int Jim_PutsCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "?-nonewline? string"); return JIM_ERROR; } if (argc == 3) { if (!Jim_CompareStringImmediate(interp, argv[1], "-nonewline")) { Jim_SetResultString(interp, "The second argument must " "be -nonewline", -1); return JIM_ERROR; } else fputs(Jim_String(argv[2]), stdout); } else puts(Jim_String(argv[1])); return JIM_OK; } // Helper for [+] and [*] static __device__ int JimAddMulHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int op) { int i; jim_wide res = (op == JIM_EXPROP_ADD ? 0 : 1); for (i = 1; i < argc; i++) { jim_wide wideValue; if (Jim_GetWide(interp, argv[i], &wideValue) != JIM_OK) goto trydouble; if (op == JIM_EXPROP_ADD) res += wideValue; else res *= wideValue; } Jim_SetResultInt(interp, res); return JIM_OK; trydouble: double doubleRes = (double)res; for (; i < argc; i++) { double doubleValue; if (Jim_GetDouble(interp, argv[i], &doubleValue) != JIM_OK) return JIM_ERROR; if (op == JIM_EXPROP_ADD) doubleRes += doubleValue; else doubleRes *= doubleValue; } Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); return JIM_OK; } /* Helper for [-] and [/] */ static __device__ int JimSubDivHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int op) { jim_wide wideValue, res = 0; double doubleValue, doubleRes = 0; int i = 2; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "number ?number ... number?"); return JIM_ERROR; } else if (argc == 2) { // The arity = 2 case is different. For [- x] returns -x, while [/ x] returns 1/x. if (Jim_GetWide(interp, argv[1], &wideValue) != JIM_OK) { if (Jim_GetDouble(interp, argv[1], &doubleValue) != JIM_OK) return JIM_ERROR; else { if (op == JIM_EXPROP_SUB) doubleRes = -doubleValue; else doubleRes = 1.0 / doubleValue; Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); return JIM_OK; } } if (op == JIM_EXPROP_SUB) { res = -wideValue; Jim_SetResultInt(interp, res); } else { doubleRes = 1.0 / wideValue; Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); } return JIM_OK; } else { if (Jim_GetWide(interp, argv[1], &res) != JIM_OK) { if (Jim_GetDouble(interp, argv[1], &doubleRes) != JIM_OK) { return JIM_ERROR; } else { goto trydouble; } } } for (i = 2; i < argc; i++) { if (Jim_GetWide(interp, argv[i], &wideValue) != JIM_OK) { doubleRes = (double)res; goto trydouble; } if (op == JIM_EXPROP_SUB) res -= wideValue; else res /= wideValue; } Jim_SetResultInt(interp, res); return JIM_OK; trydouble: for (; i < argc; i++) { if (Jim_GetDouble(interp, argv[i], &doubleValue) != JIM_OK) return JIM_ERROR; if (op == JIM_EXPROP_SUB) doubleRes -= doubleValue; else doubleRes /= doubleValue; } Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); return JIM_OK; } // [+] static __device__ int Jim_AddCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimAddMulHelper(interp, argc, argv, JIM_EXPROP_ADD); } // [*] static __device__ int Jim_MulCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimAddMulHelper(interp, argc, argv, JIM_EXPROP_MUL); } // [-] static __device__ int Jim_SubCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimSubDivHelper(interp, argc, argv, JIM_EXPROP_SUB); } // [/] static __device__ int Jim_DivCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimSubDivHelper(interp, argc, argv, JIM_EXPROP_DIV); } // [set] static __device__ int Jim_SetCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "varName ?newValue?"); return JIM_ERROR; } if (argc == 2) { Jim_Obj *objPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); if (!objPtr) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // argc == 3 case if (Jim_SetVariable(interp, argv[1], argv[2]) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, argv[2]); return JIM_OK; } // [unset] // unset ?-nocomplain? ?--? ?varName ...? static __device__ int Jim_UnsetCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int i = 1; int complain = 1; while (i < argc) { if (Jim_CompareStringImmediate(interp, argv[i], "--")) { i++; break; } if (Jim_CompareStringImmediate(interp, argv[i], "-nocomplain")) { complain = 0; i++; continue; } break; } while (i < argc) { if (Jim_UnsetVariable(interp, argv[i], complain ? JIM_ERRMSG : JIM_NONE) != JIM_OK && complain) return JIM_ERROR; i++; } return JIM_OK; } // [while] static __device__ int Jim_WhileCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "condition body"); return JIM_ERROR; } // The general purpose implementation of while starts here while (1) { int boolean, retval; if ((retval = Jim_GetBoolFromExpr(interp, argv[1], &boolean)) != JIM_OK) return retval; if (!boolean) break; if ((retval = Jim_EvalObj(interp, argv[2])) != JIM_OK) switch (retval) { case JIM_BREAK: goto out; case JIM_CONTINUE: continue; default: return retval; } } out: Jim_ResetResult(interp); return JIM_OK; } // [for] static __device__ int Jim_ForCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 5) { Jim_WrongNumArgs(interp, 1, argv, "start test next body"); return JIM_ERROR; } // Do the initialisation int retval; if ((retval = Jim_EvalObj(interp, argv[1])) != JIM_OK) return retval; // And do the first test now. Better for optimisation if we can do next/test at the bottom of the loop int boolean = 1; retval = Jim_GetBoolFromExpr(interp, argv[2], &boolean); Jim_Obj *varNamePtr = NULL; Jim_Obj *stopVarNamePtr = NULL; // Ready to do the body as follows: // while (1) { // body // check retcode // next // check retcode // test // check retcode/test bool // } #ifdef JIM_OPTIMIZATION // Check if the for is on the form: // for ... {$i < CONST} {incr i} // for ... {$i < $j} {incr i} if (retval == JIM_OK && boolean) { jim_wide stop, currentVal; Jim_Obj *objPtr; // Do it only if there aren't shared arguments ExprByteCode *expr = JimGetExpression(interp, argv[2]); ScriptObj *incrScript = JimGetScript(interp, argv[3]); // Ensure proper lengths to start if (incrScript == NULL || incrScript->len != 3 || !expr || expr->len != 3) goto evalstart; // Ensure proper token types if (incrScript->token[1].type != JIM_TT_ESC || expr->token[0].type != JIM_TT_VAR || (expr->token[1].type != JIM_TT_EXPR_INT && expr->token[1].type != JIM_TT_VAR)) goto evalstart; int cmpOffset; if (expr->token[2].type == JIM_EXPROP_LT) cmpOffset = 0; else if (expr->token[2].type == JIM_EXPROP_LTE) cmpOffset = 1; else goto evalstart; // Update command must be incr if (!Jim_CompareStringImmediate(interp, incrScript->token[1].objPtr, "incr")) goto evalstart; // incr, expression must be about the same variable if (!Jim_StringEqObj(incrScript->token[2].objPtr, expr->token[0].objPtr)) goto evalstart; // Get the stop condition (must be a variable or integer) if (expr->token[1].type == JIM_TT_EXPR_INT) { if (Jim_GetWide(interp, expr->token[1].objPtr, &stop) == JIM_ERROR) goto evalstart; } else { stopVarNamePtr = expr->token[1].objPtr; Jim_IncrRefCount(stopVarNamePtr); // Keep the compiler happy stop = 0; } // Initialization varNamePtr = expr->token[0].objPtr; Jim_IncrRefCount(varNamePtr); objPtr = Jim_GetVariable(interp, varNamePtr, JIM_NONE); if (objPtr == NULL || Jim_GetWide(interp, objPtr, &currentVal) != JIM_OK) goto testcond; // --- OPTIMIZED FOR --- while (retval == JIM_OK) { // === Check condition === // Note that currentVal is already set here // Immediate or Variable? get the 'stop' value if the latter if (stopVarNamePtr) { objPtr = Jim_GetVariable(interp, stopVarNamePtr, JIM_NONE); if (objPtr == NULL || Jim_GetWide(interp, objPtr, &stop) != JIM_OK) goto testcond; } if (currentVal >= stop + cmpOffset) break; // Eval body retval = Jim_EvalObj(interp, argv[4]); if (retval == JIM_OK || retval == JIM_CONTINUE) { retval = JIM_OK; objPtr = Jim_GetVariable(interp, varNamePtr, JIM_ERRMSG); // Increment if (objPtr == NULL) { retval = JIM_ERROR; goto out; } if (!Jim_IsShared(objPtr) && objPtr->typePtr == &_intObjType) { currentVal = ++JimWideValue(objPtr); Jim_InvalidateStringRep(objPtr); } else if (Jim_GetWide(interp, objPtr, &currentVal) != JIM_OK || Jim_SetVariable(interp, varNamePtr, Jim_NewIntObj(interp, ++currentVal)) != JIM_OK) goto evalnext; } } goto out; } evalstart: #endif while (boolean && (retval == JIM_OK || retval == JIM_CONTINUE)) { // Body retval = Jim_EvalObj(interp, argv[4]); if (retval == JIM_OK || retval == JIM_CONTINUE) { evalnext: // increment retval = Jim_EvalObj(interp, argv[3]); if (retval == JIM_OK || retval == JIM_CONTINUE) { testcond: // test retval = Jim_GetBoolFromExpr(interp, argv[2], &boolean); } } } out: if (stopVarNamePtr) Jim_DecrRefCount(interp, stopVarNamePtr); if (varNamePtr) Jim_DecrRefCount(interp, varNamePtr); if (retval == JIM_CONTINUE || retval == JIM_BREAK || retval == JIM_OK) { Jim_ResetResult(interp); return JIM_OK; } return retval; } // [loop] static __device__ int Jim_LoopCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 5 && argc != 6) { Jim_WrongNumArgs(interp, 1, argv, "var first limit ?incr? body"); return JIM_ERROR; } jim_wide i; jim_wide limit; jim_wide incr = 1; if (Jim_GetWide(interp, argv[2], &i) != JIM_OK || Jim_GetWide(interp, argv[3], &limit) != JIM_OK || (argc == 6 && Jim_GetWide(interp, argv[4], &incr) != JIM_OK)) return JIM_ERROR; Jim_Obj *bodyObjPtr = (argc == 5 ? argv[4] : argv[5]); int retval = Jim_SetVariable(interp, argv[1], argv[2]); while (((i < limit && incr > 0) || (i > limit && incr < 0)) && retval == JIM_OK) { retval = Jim_EvalObj(interp, bodyObjPtr); if (retval == JIM_OK || retval == JIM_CONTINUE) { Jim_Obj *objPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); retval = JIM_OK; // Increment i += incr; if (objPtr && !Jim_IsShared(objPtr) && objPtr->typePtr == &_intObjType) { if (argv[1]->typePtr != &_variableObjType) if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) return JIM_ERROR; JimWideValue(objPtr) = i; Jim_InvalidateStringRep(objPtr); // The following step is required in order to invalidate the string repr of "FOO" if the var name is of the form of "FOO(IDX)" if (argv[1]->typePtr != &_variableObjType) if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) { retval = JIM_ERROR; break; } } else { objPtr = Jim_NewIntObj(interp, i); retval = Jim_SetVariable(interp, argv[1], objPtr); if (retval != JIM_OK) Jim_FreeNewObj(interp, objPtr); } } } if (retval == JIM_OK || retval == JIM_CONTINUE || retval == JIM_BREAK) { Jim_ResetResult(interp); return JIM_OK; } return retval; } // List iterators make it easy to iterate over a list. At some point iterators will be expanded to support generators. typedef struct { Jim_Obj *objPtr; int idx; } Jim_ListIter; // Initialise the iterator at the start of the list. static __device__ void JimListIterInit(Jim_ListIter *iter, Jim_Obj *objPtr) { iter->objPtr = objPtr; iter->idx = 0; } // Returns the next object from the list, or NULL on end-of-list. static __device__ Jim_Obj *JimListIterNext(Jim_Interp *interp, Jim_ListIter *iter) { return (iter->idx >= Jim_ListLength(interp, iter->objPtr) ? NULL : iter->objPtr->internalRep.listValue.ele[iter->idx++]); } // Returns 1 if end-of-list has been reached. static __device__ int JimListIterDone(Jim_Interp *interp, Jim_ListIter *iter) { return iter->idx >= Jim_ListLength(interp, iter->objPtr); } // foreach + lmap implementation static __device__ int JimForeachMapHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int doMap) { int i; if (argc < 4 || argc % 2 != 0) { Jim_WrongNumArgs(interp, 1, argv, "varList list ?varList list ...? script"); return JIM_ERROR; } Jim_Obj *script = argv[argc - 1]; // Last argument is a script int numargs = (argc - 1 - 1); // argc - 'foreach' - script Jim_ListIter twoiters[2]; // Avoid allocation for a single list Jim_ListIter *iters = (numargs == 2 ? twoiters : (Jim_ListIter *)Jim_Alloc(numargs * sizeof(*iters))); int result = JIM_OK; for (i = 0; i < numargs; i++) { JimListIterInit(&iters[i], argv[i + 1]); if (i % 2 == 0 && JimListIterDone(interp, &iters[i])) result = JIM_ERROR; } if (result != JIM_OK) { Jim_SetResultString(interp, "foreach varlist is empty", -1); return result; } Jim_Obj *resultObj = (doMap ? Jim_NewListObj(interp, NULL, 0) : interp->emptyObj); Jim_IncrRefCount(resultObj); while (1) { // Have we expired all lists? for (i = 0; i < numargs; i += 2) if (!JimListIterDone(interp, &iters[i + 1])) break; // All done if (i == numargs) break; // For each list for (i = 0; i < numargs; i += 2) { // foreach var JimListIterInit(&iters[i], argv[i + 1]); Jim_Obj *varName; while ((varName = JimListIterNext(interp, &iters[i])) != NULL) { Jim_Obj *valObj = JimListIterNext(interp, &iters[i + 1]); // Ran out, so store the empty string if (!valObj) valObj = interp->emptyObj; // Avoid shimmering Jim_IncrRefCount(valObj); result = Jim_SetVariable(interp, varName, valObj); Jim_DecrRefCount(interp, valObj); if (result != JIM_OK) goto err; } } switch (result = Jim_EvalObj(interp, script)) { case JIM_OK: if (doMap) Jim_ListAppendElement(interp, resultObj, interp->result); break; case JIM_CONTINUE: break; case JIM_BREAK: goto out; default: goto err; } } out: result = JIM_OK; Jim_SetResult(interp, resultObj); err: Jim_DecrRefCount(interp, resultObj); if (numargs > 2) Jim_Free(iters); return result; } // [foreach] static __device__ int Jim_ForeachCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimForeachMapHelper(interp, argc, argv, 0); } // [lmap] static __device__ int Jim_LmapCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimForeachMapHelper(interp, argc, argv, 1); } // [lassign] static __device__ int Jim_LassignCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varList list ?varName ...?"); return JIM_ERROR; } Jim_ListIter iter; JimListIterInit(&iter, argv[1]); int result = JIM_ERROR; for (int i = 2; i < argc; i++) { Jim_Obj *valObj = JimListIterNext(interp, &iter); result = Jim_SetVariable(interp, argv[i], valObj ? valObj : interp->emptyObj); if (result != JIM_OK) return result; } Jim_Obj *resultObj = Jim_NewListObj(interp, NULL, 0); while (!JimListIterDone(interp, &iter)) Jim_ListAppendElement(interp, resultObj, JimListIterNext(interp, &iter)); Jim_SetResult(interp, resultObj); return JIM_OK; } // [if] static __device__ int Jim_IfCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int boolean, retval, current = 1, falsebody = 0; if (argc >= 3) { while (1) { // Far not enough arguments given! if (current >= argc) goto err; if ((retval = Jim_GetBoolFromExpr(interp, argv[current++], &boolean)) != JIM_OK) return retval; // There lacks something, isn't it? if (current >= argc) goto err; if (Jim_CompareStringImmediate(interp, argv[current], "then")) current++; // Tsk tsk, no then-clause? if (current >= argc) goto err; if (boolean) return Jim_EvalObj(interp, argv[current]); // Ok: no else-clause follows if (++current >= argc) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); return JIM_OK; } falsebody = current++; if (Jim_CompareStringImmediate(interp, argv[falsebody], "else")) { // IIICKS - else-clause isn't last cmd? if (current != argc - 1) goto err; return Jim_EvalObj(interp, argv[current]); } // Ok: elseif follows meaning all the stuff again (how boring...) else if (Jim_CompareStringImmediate(interp, argv[falsebody], "elseif")) continue; // OOPS - else-clause is not last cmd? else if (falsebody != argc - 1) goto err; return Jim_EvalObj(interp, argv[falsebody]); } //return JIM_OK; // unreached } err: Jim_WrongNumArgs(interp, 1, argv, "condition ?then? trueBody ?elseif ...? ?else? falseBody"); return JIM_ERROR; } // Returns 1 if match, 0 if no match or -<error> on error (e.g. -JIM_ERROR, -JIM_BREAK) __device__ int Jim_CommandMatchObj(Jim_Interp *interp, Jim_Obj *commandObj, Jim_Obj *patternObj, Jim_Obj *stringObj, int nocase) { int argc = 0; Jim_Obj *parms[4]; parms[argc++] = commandObj; if (nocase) parms[argc++] = Jim_NewStringObj(interp, "-nocase", -1); parms[argc++] = patternObj; parms[argc++] = stringObj; int rc = Jim_EvalObjVector(interp, argc, parms); long eq; if (rc != JIM_OK || Jim_GetLong(interp, Jim_GetResult(interp), &eq) != JIM_OK) eq = -rc; return eq; } enum { SWITCH_EXACT, SWITCH_GLOB, SWITCH_RE, SWITCH_CMD }; // [switch] static __device__ int Jim_SwitchCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int matchOpt = SWITCH_EXACT, opt = 1, patCount, i; Jim_Obj *command = 0, *const *caseList = 0, *strObj; Jim_Obj *script = 0; if (argc < 3) { wrongnumargs: Jim_WrongNumArgs(interp, 1, argv, "?options? string pattern body ... ?default body? or {pattern body ?pattern body ...?}"); return JIM_ERROR; } for (opt = 1; opt < argc; ++opt) { const char *option = Jim_String(argv[opt]); if (*option != '-') break; else if (!strncmp(option, "--", 2)) { ++opt; break; } else if (!strncmp(option, "-exact", 2)) matchOpt = SWITCH_EXACT; else if (!strncmp(option, "-glob", 2)) matchOpt = SWITCH_GLOB; else if (!strncmp(option, "-regexp", 2)) matchOpt = SWITCH_RE; else if (!strncmp(option, "-command", 2)) { matchOpt = SWITCH_CMD; if ((argc - opt) < 2) goto wrongnumargs; command = argv[++opt]; } else { Jim_SetResultFormatted(interp, "bad option \"%#s\": must be -exact, -glob, -regexp, -command procname or --", argv[opt]); return JIM_ERROR; } if ((argc - opt) < 2) goto wrongnumargs; } strObj = argv[opt++]; patCount = argc - opt; if (patCount == 1) { Jim_Obj **vector; JimListGetElements(interp, argv[opt], &patCount, &vector); caseList = vector; } else caseList = &argv[opt]; if (patCount == 0 || patCount % 2 != 0) goto wrongnumargs; for (i = 0; script == 0 && i < patCount; i += 2) { Jim_Obj *patObj = caseList[i]; if (!Jim_CompareStringImmediate(interp, patObj, "default") || i < (patCount - 2)) switch (matchOpt) { case SWITCH_EXACT: if (Jim_StringEqObj(strObj, patObj)) script = caseList[i + 1]; break; case SWITCH_GLOB: if (Jim_StringMatchObj(interp, patObj, strObj, 0)) script = caseList[i + 1]; break; case SWITCH_RE: command = Jim_NewStringObj(interp, "regexp", -1); // Fall thru intentionally case SWITCH_CMD:{ int rc = Jim_CommandMatchObj(interp, command, patObj, strObj, 0); // After the execution of a command we need to make sure to reconvert the object into a list again. Only for the single-list style [switch]. if (argc - opt == 1) { Jim_Obj **vector; JimListGetElements(interp, argv[opt], &patCount, &vector); caseList = vector; } // command is here already decref'd if (rc < 0) return -rc; if (rc) script = caseList[i + 1]; break; } } else script = caseList[i + 1]; } for (; i < patCount && Jim_CompareStringImmediate(interp, script, "-"); i += 2) script = caseList[i + 1]; if (script && Jim_CompareStringImmediate(interp, script, "-")) { Jim_SetResultFormatted(interp, "no body specified for pattern \"%#s\"", caseList[i - 2]); return JIM_ERROR; } Jim_ResetResult(interp); return (script ? Jim_EvalObj(interp, script) : JIM_OK); } // [list] static __device__ int Jim_ListCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, argv + 1, argc - 1); Jim_SetResult(interp, listObjPtr); return JIM_OK; } // [lindex] static __device__ int Jim_LindexCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "list ?index ...?"); return JIM_ERROR; } Jim_Obj *objPtr = argv[1]; Jim_IncrRefCount(objPtr); for (int i = 2; i < argc; i++) { Jim_Obj *listObjPtr = objPtr; int idx; if (Jim_GetIndex(interp, argv[i], &idx) != JIM_OK) { Jim_DecrRefCount(interp, listObjPtr); return JIM_ERROR; } if (Jim_ListIndex(interp, listObjPtr, idx, &objPtr, JIM_NONE) != JIM_OK) { // Returns an empty object if the index is out of range Jim_DecrRefCount(interp, listObjPtr); Jim_ResetResult(interp); return JIM_OK; } Jim_IncrRefCount(objPtr); Jim_DecrRefCount(interp, listObjPtr); } Jim_SetResult(interp, objPtr); Jim_DecrRefCount(interp, objPtr); return JIM_OK; } // [llength] static __device__ int Jim_LlengthCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "list"); return JIM_ERROR; } Jim_SetResultInt(interp, Jim_ListLength(interp, argv[1])); return JIM_OK; } // [lsearch] __constant__ static const char * const _lsearch_options[] = { "-bool", "-not", "-nocase", "-exact", "-glob", "-regexp", "-all", "-inline", "-command", NULL }; static __device__ int Jim_LsearchCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_BOOL, OPT_NOT, OPT_NOCASE, OPT_EXACT, OPT_GLOB, OPT_REGEXP, OPT_ALL, OPT_INLINE, OPT_COMMAND }; int i; int opt_bool = 0; int opt_not = 0; int opt_nocase = 0; int opt_all = 0; int opt_inline = 0; int opt_match = OPT_EXACT; int rc = JIM_OK; Jim_Obj *listObjPtr = NULL; Jim_Obj *commandObj = NULL; if (argc < 3) { wrongargs: Jim_WrongNumArgs(interp, 1, argv, "?-exact|-glob|-regexp|-command 'command'? ?-bool|-inline? ?-not? ?-nocase? ?-all? list value"); return JIM_ERROR; } for (i = 1; i < argc - 2; i++) { int option; if (Jim_GetEnum(interp, argv[i], _lsearch_options, &option, NULL, JIM_ERRMSG) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_BOOL: opt_bool = 1; opt_inline = 0; break; case OPT_NOT: opt_not = 1; break; case OPT_NOCASE: opt_nocase = 1; break; case OPT_INLINE: opt_inline = 1; opt_bool = 0; break; case OPT_ALL: opt_all = 1; break; case OPT_COMMAND: if (i >= argc - 2) goto wrongargs; commandObj = argv[++i]; // fallthru case OPT_EXACT: case OPT_GLOB: case OPT_REGEXP: opt_match = option; break; } } argv += i; if (opt_all) listObjPtr = Jim_NewListObj(interp, NULL, 0); if (opt_match == OPT_REGEXP) commandObj = Jim_NewStringObj(interp, "regexp", -1); if (commandObj) Jim_IncrRefCount(commandObj); int listlen = Jim_ListLength(interp, argv[0]); for (i = 0; i < listlen; i++) { int eq = 0; Jim_Obj *objPtr = Jim_ListGetIndex(interp, argv[0], i); switch (opt_match) { case OPT_EXACT: eq = Jim_StringCompareObj(interp, argv[1], objPtr, opt_nocase) == 0; break; case OPT_GLOB: eq = Jim_StringMatchObj(interp, argv[1], objPtr, opt_nocase); break; case OPT_REGEXP: case OPT_COMMAND: eq = Jim_CommandMatchObj(interp, commandObj, argv[1], objPtr, opt_nocase); if (eq < 0) { if (listObjPtr) Jim_FreeNewObj(interp, listObjPtr); rc = JIM_ERROR; goto done; } break; } // If we have a non-match with opt_bool, opt_not, !opt_all, can't exit early if (!eq && opt_bool && opt_not && !opt_all) continue; if ((!opt_bool && eq == !opt_not) || (opt_bool && (eq || opt_all))) { // Got a match (or non-match for opt_not), or (opt_bool && opt_all) Jim_Obj *resultObj; if (opt_bool) resultObj = Jim_NewIntObj(interp, eq ^ opt_not); else if (!opt_inline) resultObj = Jim_NewIntObj(interp, i); else resultObj = objPtr; if (opt_all) Jim_ListAppendElement(interp, listObjPtr, resultObj); else { Jim_SetResult(interp, resultObj); goto done; } } } if (opt_all) Jim_SetResult(interp, listObjPtr); else { // No match if (opt_bool) Jim_SetResultBool(interp, opt_not); else if (!opt_inline) Jim_SetResultInt(interp, -1); } done: if (commandObj) Jim_DecrRefCount(interp, commandObj); return rc; } // [lappend] static __device__ int Jim_LappendCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?value value ...?"); return JIM_ERROR; } Jim_Obj *listObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); if (!listObjPtr) { // Create the list if it does not exist listObjPtr = Jim_NewListObj(interp, NULL, 0); if (Jim_SetVariable(interp, argv[1], listObjPtr) != JIM_OK) { Jim_FreeNewObj(interp, listObjPtr); return JIM_ERROR; } } int shared = Jim_IsShared(listObjPtr); if (shared) listObjPtr = Jim_DuplicateObj(interp, listObjPtr); for (int i = 2; i < argc; i++) Jim_ListAppendElement(interp, listObjPtr, argv[i]); if (Jim_SetVariable(interp, argv[1], listObjPtr) != JIM_OK) { if (shared) Jim_FreeNewObj(interp, listObjPtr); return JIM_ERROR; } Jim_SetResult(interp, listObjPtr); return JIM_OK; } // [linsert] static __device__ int Jim_LinsertCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "list index ?element ...?"); return JIM_ERROR; } Jim_Obj *listPtr = argv[1]; if (Jim_IsShared(listPtr)) listPtr = Jim_DuplicateObj(interp, listPtr); int idx; if (Jim_GetIndex(interp, argv[2], &idx) != JIM_OK) goto err; int len = Jim_ListLength(interp, listPtr); if (idx >= len) idx = len; else if (idx < 0) idx = len + idx + 1; Jim_ListInsertElements(interp, listPtr, idx, argc - 3, &argv[3]); Jim_SetResult(interp, listPtr); return JIM_OK; err: if (listPtr != argv[1]) Jim_FreeNewObj(interp, listPtr); return JIM_ERROR; } // [lreplace] static __device__ int Jim_LreplaceCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 4) { Jim_WrongNumArgs(interp, 1, argv, "list first last ?element ...?"); return JIM_ERROR; } int first, last; if (Jim_GetIndex(interp, argv[2], &first) != JIM_OK || Jim_GetIndex(interp, argv[3], &last) != JIM_OK) return JIM_ERROR; Jim_Obj *listObj = argv[1]; int len = Jim_ListLength(interp, listObj); first = JimRelToAbsIndex(len, first); last = JimRelToAbsIndex(len, last); int rangeLen; JimRelToAbsRange(len, &first, &last, &rangeLen); // Now construct a new list which consists of: <elements before first> <supplied elements> <elements after last> // Check to see if trying to replace past the end of the list if (first < len) { } // OK. Not past the end else if (len == 0) first = 0; // Special for empty list, adjust first to 0 else { Jim_SetResultString(interp, "list doesn't contain element ", -1); Jim_AppendObj(interp, Jim_GetResult(interp), argv[2]); return JIM_ERROR; } // Add the first set of elements Jim_Obj *newListObj = Jim_NewListObj(interp, listObj->internalRep.listValue.ele, first); // Add supplied elements ListInsertElements(newListObj, -1, argc - 4, argv + 4); // Add the remaining elements ListInsertElements(newListObj, -1, len - first - rangeLen, listObj->internalRep.listValue.ele + first + rangeLen); Jim_SetResult(interp, newListObj); return JIM_OK; } // [lset] static __device__ int Jim_LsetCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "listVar ?index...? newVal"); return JIM_ERROR; } else if (argc == 3) { // With no indexes, simply implements [set] if (Jim_SetVariable(interp, argv[1], argv[2], 0) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, argv[2]); return JIM_OK; } return Jim_ListSetIndex(interp, argv[1], argv + 2, argc - 3, argv[argc - 1]); } // [lsort] __constant__ static const char * const _lsort_options[] = { "-ascii", "-nocase", "-increasing", "-decreasing", "-command", "-integer", "-real", "-index", "-unique", NULL }; static __device__ int Jim_LsortCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const argv[]) { enum { OPT_ASCII, OPT_NOCASE, OPT_INCREASING, OPT_DECREASING, OPT_COMMAND, OPT_INTEGER, OPT_REAL, OPT_INDEX, OPT_UNIQUE }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "?options? list"); return JIM_ERROR; } struct lsort_info info; info.type = lsort_info::JIM_LSORT_ASCII; info.order = 1; info.indexed = 0; info.unique = 0; info.command = NULL; info.interp = interp; for (int i = 1; i < (argc - 1); i++) { int option; if (Jim_GetEnum(interp, argv[i], _lsort_options, &option, NULL, JIM_ENUM_ABBREV | JIM_ERRMSG) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_ASCII: info.type = lsort_info::JIM_LSORT_ASCII; break; case OPT_NOCASE: info.type = lsort_info::JIM_LSORT_NOCASE; break; case OPT_INTEGER: info.type = lsort_info::JIM_LSORT_INTEGER; break; case OPT_REAL: info.type = lsort_info::JIM_LSORT_REAL; break; case OPT_INCREASING: info.order = 1; break; case OPT_DECREASING: info.order = -1; break; case OPT_UNIQUE: info.unique = 1; break; case OPT_COMMAND: if (i >= (argc - 2)) { Jim_SetResultString(interp, "\"-command\" option must be followed by comparison command", -1); return JIM_ERROR; } info.type = lsort_info::JIM_LSORT_COMMAND; info.command = argv[i + 1]; i++; break; case OPT_INDEX: if (i >= (argc - 2)) { Jim_SetResultString(interp, "\"-index\" option must be followed by list index", -1); return JIM_ERROR; } if (Jim_GetIndex(interp, argv[i + 1], &info.index) != JIM_OK) return JIM_ERROR; info.indexed = 1; i++; break; } } Jim_Obj *resObj = Jim_DuplicateObj(interp, argv[argc - 1]); int retCode = ListSortElements(interp, resObj, &info); if (retCode == JIM_OK) Jim_SetResult(interp, resObj); else Jim_FreeNewObj(interp, resObj); return retCode; } // [append] static __device__ int Jim_AppendCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?value ...?"); return JIM_ERROR; } Jim_Obj *stringObjPtr; if (argc == 2) { stringObjPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); if (!stringObjPtr) return JIM_ERROR; } else { int freeobj = 0; stringObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); if (!stringObjPtr) { // Create the string if it doesn't exist stringObjPtr = Jim_NewEmptyStringObj(interp); freeobj = 1; } else if (Jim_IsShared(stringObjPtr)) { freeobj = 1; stringObjPtr = Jim_DuplicateObj(interp, stringObjPtr); } for (int i = 2; i < argc; i++) Jim_AppendObj(interp, stringObjPtr, argv[i]); if (Jim_SetVariable(interp, argv[1], stringObjPtr) != JIM_OK) { if (freeobj) Jim_FreeNewObj(interp, stringObjPtr); return JIM_ERROR; } } Jim_SetResult(interp, stringObjPtr); return JIM_OK; } // [debug] #if defined(JIM_DEBUG_COMMAND) && !defined(JIM_BOOTSTRAP) __constant__ static const char *const _debug_options[] = { "refcount", "objcount", "objects", "invstr", "scriptlen", "exprlen", "exprbc", "show", NULL }; #endif static __device__ int Jim_DebugCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if defined(JIM_DEBUG_COMMAND) && !defined(JIM_BOOTSTRAP) enum { OPT_REFCOUNT, OPT_OBJCOUNT, OPT_OBJECTS, OPT_INVSTR, OPT_SCRIPTLEN, OPT_EXPRLEN, OPT_EXPRBC, OPT_SHOW, }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?...?"); return JIM_ERROR; } int option; if (Jim_GetEnum(interp, argv[1], _debug_options, &option, "subcommand", JIM_ERRMSG) != JIM_OK) return JIM_ERROR; if (option == OPT_REFCOUNT) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "object"); return JIM_ERROR; } Jim_SetResultInt(interp, argv[2]->refCount); return JIM_OK; } else if (option == OPT_OBJCOUNT) { int freeobj = 0, liveobj = 0; if (argc != 2) { Jim_WrongNumArgs(interp, 2, argv, ""); return JIM_ERROR; } // Count the number of free objects Jim_Obj *objPtr = interp->freeList; while (objPtr) { freeobj++; objPtr = objPtr->nextObjPtr; } // Count the number of live objects objPtr = interp->liveList; while (objPtr) { liveobj++; objPtr = objPtr->nextObjPtr; } // Set the result string and return char buf[256]; sprintf(buf, "free %d used %d", freeobj, liveobj); Jim_SetResultString(interp, buf, -1); return JIM_OK; } else if (option == OPT_OBJECTS) { // Count the number of live objects Jim_Obj *objPtr = interp->liveList; Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); while (objPtr) { const char *type = (objPtr->typePtr ? objPtr->typePtr->name : ""); Jim_Obj *subListObjPtr = Jim_NewListObj(interp, NULL, 0); char buf[128]; sprintf(buf, "%p", objPtr); Jim_ListAppendElement(interp, subListObjPtr, Jim_NewStringObj(interp, buf, -1)); Jim_ListAppendElement(interp, subListObjPtr, Jim_NewStringObj(interp, type, -1)); Jim_ListAppendElement(interp, subListObjPtr, Jim_NewIntObj(interp, objPtr->refCount)); Jim_ListAppendElement(interp, subListObjPtr, objPtr); Jim_ListAppendElement(interp, listObjPtr, subListObjPtr); objPtr = objPtr->nextObjPtr; } Jim_SetResult(interp, listObjPtr); return JIM_OK; } else if (option == OPT_INVSTR) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "object"); return JIM_ERROR; } Jim_Obj *objPtr = argv[2]; if (objPtr->typePtr != NULL) Jim_InvalidateStringRep(objPtr); Jim_ResetResult(interp); return JIM_OK; } else if (option == OPT_SHOW) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "object"); return JIM_ERROR; } int len, charlen; const char *s = Jim_GetString(argv[2], &len); #ifdef JIM_UTF8 charlen = utf8_strlen(s, len); #else charlen = len; #endif printf("refcount: %d, type: %s\n", argv[2]->refCount, JimObjTypeName(argv[2])); printf("chars (%d): <<%s>>\n", charlen, s); printf("bytes (%d):", len); while (len--) printf(" %02x", (unsigned char)*s++); printf("\n"); return JIM_OK; } else if (option == OPT_SCRIPTLEN) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "script"); return JIM_ERROR; } ScriptObj *script = JimGetScript(interp, argv[2]); if (script == NULL) return JIM_ERROR; Jim_SetResultInt(interp, script->len); return JIM_OK; } else if (option == OPT_EXPRLEN) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "expression"); return JIM_ERROR; } ExprByteCode *expr = JimGetExpression(interp, argv[2]); if (expr == NULL) return JIM_ERROR; Jim_SetResultInt(interp, expr->len); return JIM_OK; } else if (option == OPT_EXPRBC) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "expression"); return JIM_ERROR; } ExprByteCode *expr = JimGetExpression(interp, argv[2]); if (expr == NULL) return JIM_ERROR; Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; i < expr->len; i++) { const char *type; const Jim_ExprOperator *op; Jim_Obj *obj = expr->token[i].objPtr; switch (expr->token[i].type) { case JIM_TT_EXPR_INT: type = "int"; break; case JIM_TT_EXPR_DOUBLE: type = "double"; break; case JIM_TT_CMD: type = "command"; break; case JIM_TT_VAR: type = "variable"; break; case JIM_TT_DICTSUGAR: type = "dictsugar"; break; case JIM_TT_EXPRSUGAR: type = "exprsugar"; break; case JIM_TT_ESC: type = "subst"; break; case JIM_TT_STR: type = "string"; break; default: op = JimExprOperatorInfoByOpcode(expr->token[i].type); type = (op == NULL ? "private" : "operator"); obj = Jim_NewStringObj(interp, op ? op->name : "", -1); break; } Jim_ListAppendElement(interp, objPtr, Jim_NewStringObj(interp, type, -1)); Jim_ListAppendElement(interp, objPtr, obj); } Jim_SetResult(interp, objPtr); return JIM_OK; } else { Jim_SetResultString(interp, "bad option. Valid options are refcount, " "objcount, objects, invstr", -1); return JIM_ERROR; } // unreached #endif // JIM_BOOTSTRAP #ifndef JIM_DEBUG_COMMAND Jim_SetResultString(interp, "unsupported", -1); return JIM_ERROR; #endif } // [eval] static __device__ int Jim_EvalCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "arg ?arg ...?"); return JIM_ERROR; } int rc = Jim_EvalObj(interp, argc == 2 ? argv[1] : Jim_ConcatObj(interp, argc - 1, argv + 1)); // eval is "interesting", so add a stack frame here if (rc == JIM_ERROR) interp->addStackTrace++; return rc; } // [uplevel] static __device__ int Jim_UplevelCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc >= 2) { // Save the old callframe pointer Jim_CallFrame *savedCallFrame = interp->framePtr; Jim_CallFrame *targetCallFrame; // Lookup the target frame pointer const char *str = Jim_String(argv[1]); if ((str[0] >= '0' && str[0] <= '9') || str[0] == '#') { targetCallFrame = Jim_GetCallFrameByLevel(interp, argv[1]); argc--; argv++; } else targetCallFrame = Jim_GetCallFrameByLevel(interp, NULL); if (targetCallFrame == NULL) return JIM_ERROR; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv - 1, "?level? command ?arg ...?"); return JIM_ERROR; } // Eval the code in the target callframe interp->framePtr = targetCallFrame; int retcode = Jim_EvalObj(interp, argc == 2 ? argv[1] : Jim_ConcatObj(interp, argc - 1, argv + 1)); interp->framePtr = savedCallFrame; return retcode; } else { Jim_WrongNumArgs(interp, 1, argv, "?level? command ?arg ...?"); return JIM_ERROR; } } // [expr] static __device__ int Jim_ExprCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *exprResultPtr; int retcode; if (argc == 2) retcode = Jim_EvalExpression(interp, argv[1], &exprResultPtr); else if (argc > 2) { Jim_Obj *objPtr = Jim_ConcatObj(interp, argc - 1, argv + 1); Jim_IncrRefCount(objPtr); retcode = Jim_EvalExpression(interp, objPtr, &exprResultPtr); Jim_DecrRefCount(interp, objPtr); } else { Jim_WrongNumArgs(interp, 1, argv, "expression ?...?"); return JIM_ERROR; } if (retcode != JIM_OK) return retcode; Jim_SetResult(interp, exprResultPtr); Jim_DecrRefCount(interp, exprResultPtr); return JIM_OK; } // [break] static __device__ int Jim_BreakCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, ""); return JIM_ERROR; } return JIM_BREAK; } // [continue] static __device__ int Jim_ContinueCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, ""); return JIM_ERROR; } return JIM_CONTINUE; } // [return] static __device__ int Jim_ReturnCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int i; Jim_Obj *stackTraceObj = NULL; Jim_Obj *errorCodeObj = NULL; int returnCode = JIM_OK; long level = 1; for (i = 1; i < argc - 1; i += 2) { if (Jim_CompareStringImmediate(interp, argv[i], "-code")) { if (Jim_GetReturnCode(interp, argv[i + 1], &returnCode) == JIM_ERROR) return JIM_ERROR; } else if (Jim_CompareStringImmediate(interp, argv[i], "-errorinfo")) stackTraceObj = argv[i + 1]; else if (Jim_CompareStringImmediate(interp, argv[i], "-errorcode")) errorCodeObj = argv[i + 1]; else if (Jim_CompareStringImmediate(interp, argv[i], "-level")) { if (Jim_GetLong(interp, argv[i + 1], &level) != JIM_OK || level < 0) { Jim_SetResultFormatted(interp, "bad level \"%#s\"", argv[i + 1]); return JIM_ERROR; } } else break; } if (i != argc - 1 && i != argc) Jim_WrongNumArgs(interp, 1, argv, "?-code code? ?-errorinfo stacktrace? ?-level level? ?result?"); // If a stack trace is supplied and code is error, set the stack trace if (stackTraceObj && returnCode == JIM_ERROR) JimSetStackTrace(interp, stackTraceObj); // If an error code list is supplied, set the global $errorCode if (errorCodeObj && returnCode == JIM_ERROR) Jim_SetVariableStr(interp, "errorCode", errorCodeObj, JIMGLOBAL_); interp->returnCode = returnCode; interp->returnLevel = level; if (i == argc - 1) Jim_SetResult(interp, argv[i]); return JIM_RETURN; } // [tailcall] static __device__ int Jim_TailcallCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (interp->framePtr->level == 0) { Jim_SetResultString(interp, "tailcall can only be called from a proc or lambda", -1); return JIM_ERROR; } else if (argc >= 2) { // Need to resolve the tailcall command in the current context Jim_CallFrame *cf = interp->framePtr->parent; Jim_Cmd *cmdPtr = Jim_GetCommand(interp, argv[1], JIM_ERRMSG); if (cmdPtr == NULL) return JIM_ERROR; JimPanic(cf->tailcallCmd != NULL, "Already have a tailcallCmd"); // And stash this pre-resolved command JimIncrCmdRefCount(cmdPtr); cf->tailcallCmd = cmdPtr; // And stash the command list JimPanic(cf->tailcallObj != NULL, "Already have a tailcallobj"); cf->tailcallObj = Jim_NewListObj(interp, argv + 1, argc - 1); Jim_IncrRefCount(cf->tailcallObj); // When the stack unwinds to the previous proc, the stashed command will be evaluated return JIM_EVAL; } return JIM_OK; } static __device__ int JimAliasCmd(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *prefixListObj = (Jim_Obj *)Jim_CmdPrivData(interp); // prefixListObj is a list to which the args need to be appended Jim_Obj *cmdList = Jim_DuplicateObj(interp, prefixListObj); Jim_ListInsertElements(interp, cmdList, Jim_ListLength(interp, cmdList), argc - 1, argv + 1); return JimEvalObjList(interp, cmdList); } static __device__ void JimAliasCmdDelete(ClientData privData, Jim_Interp *interp) { Jim_Obj *prefixListObj = (Jim_Obj *)privData; Jim_DecrRefCount(interp, prefixListObj); } static __device__ int Jim_AliasCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "newname command ?args ...?"); return JIM_ERROR; } Jim_Obj *prefixListObj = Jim_NewListObj(interp, argv + 2, argc - 2); Jim_IncrRefCount(prefixListObj); const char *newname = Jim_String(argv[1]); if (newname[0] == ':' && newname[1] == ':') { while (*++newname == ':') { } } Jim_SetResult(interp, argv[1]); return Jim_CreateCommand(interp, newname, JimAliasCmd, prefixListObj, JimAliasCmdDelete); } // [proc] static __device__ int Jim_ProcCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 4 && argc != 5) { Jim_WrongNumArgs(interp, 1, argv, "name arglist ?statics? body"); return JIM_ERROR; } if (JimValidName(interp, "procedure", argv[1]) != JIM_OK) return JIM_ERROR; Jim_Cmd *cmd = (argc == 4 ? JimCreateProcedureCmd(interp, argv[2], NULL, argv[3], NULL) : JimCreateProcedureCmd(interp, argv[2], argv[3], argv[4], NULL)); if (cmd) { // Add the new command Jim_Obj *qualifiedCmdNameObj; const char *cmdname = JimQualifyName(interp, Jim_String(argv[1]), &qualifiedCmdNameObj); JimCreateCommand(interp, cmdname, cmd); // Calculate and set the namespace for this proc JimUpdateProcNamespace(interp, cmd, cmdname); JimFreeQualifiedName(interp, qualifiedCmdNameObj); // Unlike Tcl, set the name of the proc as the result Jim_SetResult(interp, argv[1]); return JIM_OK; } return JIM_ERROR; } // [local] static __device__ int Jim_LocalCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "cmd ?args ...?"); return JIM_ERROR; } // Evaluate the arguments with 'local' in force interp->local++; int retcode = Jim_EvalObjVector(interp, argc - 1, argv + 1); interp->local--; // If OK, and the result is a proc, add it to the list of local procs if (retcode == 0) { Jim_Obj *cmdNameObj = Jim_GetResult(interp); if (Jim_GetCommand(interp, cmdNameObj, JIM_ERRMSG) == NULL) return JIM_ERROR; if (interp->framePtr->localCommands == NULL) { interp->framePtr->localCommands = (Jim_Stack *)Jim_Alloc(sizeof(*interp->framePtr->localCommands)); Jim_InitStack(interp->framePtr->localCommands); } Jim_IncrRefCount(cmdNameObj); Jim_StackPush(interp->framePtr->localCommands, cmdNameObj); } return retcode; } // [upcall] static __device__ int Jim_UpcallCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "cmd ?args ...?"); return JIM_ERROR; } else { Jim_Cmd *cmdPtr = Jim_GetCommand(interp, argv[1], JIM_ERRMSG); if (cmdPtr == NULL || !cmdPtr->isproc || !cmdPtr->prevCmd) { Jim_SetResultFormatted(interp, "no previous command: \"%#s\"", argv[1]); return JIM_ERROR; } // OK. Mark this command as being in an upcall cmdPtr->u.proc.upcall++; JimIncrCmdRefCount(cmdPtr); // Invoke the command as normal int retcode = Jim_EvalObjVector(interp, argc - 1, argv + 1); // No longer in an upcall cmdPtr->u.proc.upcall--; JimDecrCmdRefCount(interp, cmdPtr); return retcode; } } // [apply] static __device__ int Jim_ApplyCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "lambdaExpr ?arg ...?"); return JIM_ERROR; } else { int len = Jim_ListLength(interp, argv[1]); if (len != 2 && len != 3) { Jim_SetResultFormatted(interp, "can't interpret \"%#s\" as a lambda expression", argv[1]); return JIM_ERROR; } Jim_Obj *nsObj = NULL; if (len == 3) { #ifdef jim_ext_namespace nsObj = JimQualifyNameObj(interp, Jim_ListGetIndex(interp, argv[1], 2)); // Need to canonicalise the given namespace #else Jim_SetResultString(interp, "namespaces not enabled", -1); return JIM_ERROR; #endif } Jim_Obj *argListObjPtr = Jim_ListGetIndex(interp, argv[1], 0); Jim_Obj *bodyObjPtr = Jim_ListGetIndex(interp, argv[1], 1); Jim_Cmd *cmd = JimCreateProcedureCmd(interp, argListObjPtr, NULL, bodyObjPtr, nsObj); if (cmd) { // Create a new argv array with a dummy argv[0], for error messages Jim_Obj **nargv = (Jim_Obj **)Jim_Alloc((argc - 2 + 1) * sizeof(*nargv)); nargv[0] = Jim_NewStringObj(interp, "apply lambdaExpr", -1); Jim_IncrRefCount(nargv[0]); memcpy(&nargv[1], argv + 2, (argc - 2) * sizeof(*nargv)); int ret = JimCallProcedure(interp, cmd, argc - 2 + 1, nargv); Jim_DecrRefCount(interp, nargv[0]); Jim_Free(nargv); JimDecrCmdRefCount(interp, cmd); return ret; } return JIM_ERROR; } } // [concat] static __device__ int Jim_ConcatCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_SetResult(interp, Jim_ConcatObj(interp, argc - 1, argv + 1)); return JIM_OK; } // [upvar] static __device__ int Jim_UpvarCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_CallFrame *targetCallFrame; // Lookup the target frame pointer if (argc > 3 && (argc % 2 == 0)) { targetCallFrame = Jim_GetCallFrameByLevel(interp, argv[1]); argc--; argv++; } else targetCallFrame = Jim_GetCallFrameByLevel(interp, NULL); if (targetCallFrame == NULL) return JIM_ERROR; // Check for arity if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "?level? otherVar localVar ?otherVar localVar ...?"); return JIM_ERROR; } // Now... for every other/local couple: for (int i = 1; i < argc; i += 2) if (Jim_SetVariableLink(interp, argv[i + 1], argv[i], targetCallFrame) != JIM_OK) return JIM_ERROR; return JIM_OK; } // [global] static __device__ int JimGLOBAL_CoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?varName ...?"); return JIM_ERROR; } // Link every var to the toplevel having the same name if (interp->framePtr->level == 0) return JIM_OK; // global at toplevel... for (int i = 1; i < argc; i++) { // global ::blah does nothing const char *name = Jim_String(argv[i]); if (name[0] != ':' || name[1] != ':') if (Jim_SetVariableLink(interp, argv[i], argv[i], interp->topFramePtr) != JIM_OK) return JIM_ERROR; } return JIM_OK; } // does the [string map] operation. On error NULL is returned, otherwise a new string object with the result, having refcount = 0, is returned. static __device__ Jim_Obj *JimStringMap(Jim_Interp *interp, Jim_Obj *mapListObjPtr, Jim_Obj *objPtr, int nocase) { int numMaps = Jim_ListLength(interp, mapListObjPtr); if (numMaps % 2) { Jim_SetResultString(interp, "list must contain an even number of elements", -1); return NULL; } const char *str = Jim_String(objPtr); int strLen = Jim_Utf8Length(interp, objPtr); // Map it const char *noMatchStart = NULL; Jim_Obj *resultObjPtr = Jim_NewStringObj(interp, "", 0); while (strLen) { int i; for (i = 0; i < numMaps; i += 2) { Jim_Obj *objPtr = Jim_ListGetIndex(interp, mapListObjPtr, i); const char *k = Jim_String(objPtr); int kl = Jim_Utf8Length(interp, objPtr); if (strLen >= kl && kl) { int rc = JimStringCompareLen(str, k, kl, nocase); if (rc == 0) { if (noMatchStart) { Jim_AppendString(interp, resultObjPtr, noMatchStart, (int)(str - noMatchStart)); noMatchStart = NULL; } Jim_AppendObj(interp, resultObjPtr, Jim_ListGetIndex(interp, mapListObjPtr, i + 1)); str += utf8_index(str, kl); strLen -= kl; break; } } } // no match if (i == numMaps) { if (noMatchStart == NULL) noMatchStart = str; int c; UNUSED_SYMBOL(c); str += utf8_tounicode(str, &c); strLen--; } } if (noMatchStart) Jim_AppendString(interp, resultObjPtr, noMatchStart, (int)(str - noMatchStart)); return resultObjPtr; } // [string] __constant__ static const char *const _string_options[] = { "bytelength", "length", "compare", "match", "equal", "is", "byterange", "range", "replace", "map", "repeat", "reverse", "index", "first", "last", "cat", "trim", "trimleft", "trimright", "tolower", "toupper", "totitle", NULL }; __constant__ static const char *const _string_nocase_options[] = { "-nocase", NULL }; __constant__ static const char *const _string_nocase_length_options[] = { "-nocase", "-length", NULL }; static __device__ int Jim_StringCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_BYTELENGTH, OPT_LENGTH, OPT_COMPARE, OPT_MATCH, OPT_EQUAL, OPT_IS, OPT_BYTERANGE, OPT_RANGE, OPT_REPLACE, OPT_MAP, OPT_REPEAT, OPT_REVERSE, OPT_INDEX, OPT_FIRST, OPT_LAST, OPT_CAT, OPT_TRIM, OPT_TRIMLEFT, OPT_TRIMRIGHT, OPT_TOLOWER, OPT_TOUPPER, OPT_TOTITLE }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "option ?arguments ...?"); return JIM_ERROR; } int option; if (Jim_GetEnum(interp, argv[1], _string_options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; int len; int opt_case = 1; switch (option) { case OPT_LENGTH: case OPT_BYTELENGTH: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "string"); return JIM_ERROR; } len = (option == OPT_LENGTH ? Jim_Utf8Length(interp, argv[2]) : Jim_Length(argv[2])); Jim_SetResultInt(interp, len); return JIM_OK; case OPT_CAT: { Jim_Obj *objPtr; // optimise the one-arg case if (argc == 3) objPtr = argv[2]; else { objPtr = Jim_NewStringObj(interp, "", 0); for (int i = 2; i < argc; i++) Jim_AppendObj(interp, objPtr, argv[i]); } Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_COMPARE: case OPT_EQUAL: { // n is the number of remaining option args long opt_length = -1; int n = argc - 4; int i = 2; while (n > 0) { int subopt; if (Jim_GetEnum(interp, argv[i++], _string_nocase_length_options, &subopt, NULL, JIM_ENUM_ABBREV) != JIM_OK) { badcompareargs: Jim_WrongNumArgs(interp, 2, argv, "?-nocase? ?-length int? string1 string2"); return JIM_ERROR; } if (subopt == 0) { // -nocase opt_case = 0; n--; } else { // -length if (n < 2) goto badcompareargs; if (Jim_GetLong(interp, argv[i++], &opt_length) != JIM_OK) return JIM_ERROR; n -= 2; } } if (n) goto badcompareargs; argv += argc - 2; // Fast version - [string equal], case sensitive, no length if (opt_length < 0 && option != OPT_COMPARE && opt_case) Jim_SetResultBool(interp, Jim_StringEqObj(argv[0], argv[1])); else { if (opt_length >= 0) n = JimStringCompareLen(Jim_String(argv[0]), Jim_String(argv[1]), opt_length, !opt_case); else n = Jim_StringCompareObj(interp, argv[0], argv[1], !opt_case); Jim_SetResultInt(interp, option == OPT_COMPARE ? n : n == 0); } return JIM_OK; } case OPT_MATCH: if (argc != 4 && (argc != 5 || Jim_GetEnum(interp, argv[2], _string_nocase_options, &opt_case, NULL, JIM_ENUM_ABBREV) != JIM_OK)) { Jim_WrongNumArgs(interp, 2, argv, "?-nocase? pattern string"); return JIM_ERROR; } if (opt_case == 0) argv++; Jim_SetResultBool(interp, Jim_StringMatchObj(interp, argv[2], argv[3], !opt_case)); return JIM_OK; case OPT_MAP: { if (argc != 4 && (argc != 5 || Jim_GetEnum(interp, argv[2], _string_nocase_options, &opt_case, NULL, JIM_ENUM_ABBREV) != JIM_OK)) { Jim_WrongNumArgs(interp, 2, argv, "?-nocase? mapList string"); return JIM_ERROR; } if (opt_case == 0) argv++; Jim_Obj *objPtr = JimStringMap(interp, argv[2], argv[3], !opt_case); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_RANGE: case OPT_BYTERANGE:{ if (argc != 5) { Jim_WrongNumArgs(interp, 2, argv, "string first last"); return JIM_ERROR; } Jim_Obj *objPtr = (option == OPT_RANGE ? Jim_StringRangeObj(interp, argv[2], argv[3], argv[4]) : Jim_StringByteRangeObj(interp, argv[2], argv[3], argv[4])); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_REPLACE:{ if (argc != 5 && argc != 6) { Jim_WrongNumArgs(interp, 2, argv, "string first last ?string?"); return JIM_ERROR; } Jim_Obj *objPtr = JimStringReplaceObj(interp, argv[2], argv[3], argv[4], argc == 6 ? argv[5] : NULL); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_REPEAT:{ if (argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "string count"); return JIM_ERROR; } jim_wide count; if (Jim_GetWide(interp, argv[3], &count) != JIM_OK) return JIM_ERROR; Jim_Obj *objPtr = Jim_NewStringObj(interp, "", 0); if (count > 0) while (count--) Jim_AppendObj(interp, objPtr, argv[2]); Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_REVERSE:{ if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "string"); return JIM_ERROR; } const char *str = Jim_GetString(argv[2], &len); char *buf = (char *)Jim_Alloc(len + 1); char *p = buf + len; *p = 0; for (int i = 0; i < len; ) { int c; UNUSED_SYMBOL(c); int l = utf8_tounicode(str, &c); memcpy(p - l, str, l); p -= l; i += l; str += l; } Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, buf, len)); return JIM_OK; } case OPT_INDEX:{ if (argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "string index"); return JIM_ERROR; } int idx; if (Jim_GetIndex(interp, argv[3], &idx) != JIM_OK) return JIM_ERROR; const char *str = Jim_String(argv[2]); len = Jim_Utf8Length(interp, argv[2]); if (idx != INT_MIN && idx != INT_MAX) idx = JimRelToAbsIndex(len, idx); if (idx < 0 || idx >= len || str == NULL) Jim_SetResultString(interp, "", 0); // ASCII optimisation else if (len == Jim_Length(argv[2])) Jim_SetResultString(interp, str + idx, 1); else { int i = utf8_index(str, idx); int c; UNUSED_SYMBOL(c); Jim_SetResultString(interp, str + i, utf8_tounicode(str + i, &c)); } return JIM_OK; } case OPT_FIRST: case OPT_LAST:{ if (argc != 4 && argc != 5) { Jim_WrongNumArgs(interp, 2, argv, "subString string ?index?"); return JIM_ERROR; } const char *s1 = Jim_String(argv[2]); const char *s2 = Jim_String(argv[3]); int l1 = Jim_Utf8Length(interp, argv[2]); int l2 = Jim_Utf8Length(interp, argv[3]); int idx = 0; if (argc == 5) { if (Jim_GetIndex(interp, argv[4], &idx) != JIM_OK) return JIM_ERROR; idx = JimRelToAbsIndex(l2, idx); } else if (option == OPT_LAST) idx = l2; if (option == OPT_FIRST) Jim_SetResultInt(interp, JimStringFirst(s1, l1, s2, l2, idx)); else #ifdef JIM_UTF8 Jim_SetResultInt(interp, JimStringLastUtf8(s1, l1, s2, idx)); #else Jim_SetResultInt(interp, JimStringLast(s1, l1, s2, idx)); #endif return JIM_OK; } case OPT_TRIM: case OPT_TRIMLEFT: case OPT_TRIMRIGHT:{ if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "string ?trimchars?"); return JIM_ERROR; } Jim_Obj *trimchars = (argc == 4 ? argv[3] : NULL); if (option == OPT_TRIM) Jim_SetResult(interp, JimStringTrim(interp, argv[2], trimchars)); else if (option == OPT_TRIMLEFT) Jim_SetResult(interp, JimStringTrimLeft(interp, argv[2], trimchars)); else if (option == OPT_TRIMRIGHT) Jim_SetResult(interp, JimStringTrimRight(interp, argv[2], trimchars)); return JIM_OK; } case OPT_TOLOWER: case OPT_TOUPPER: case OPT_TOTITLE: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "string"); return JIM_ERROR; } if (option == OPT_TOLOWER) Jim_SetResult(interp, JimStringToLower(interp, argv[2])); else if (option == OPT_TOUPPER) Jim_SetResult(interp, JimStringToUpper(interp, argv[2])); else Jim_SetResult(interp, JimStringToTitle(interp, argv[2])); return JIM_OK; case OPT_IS: if (argc == 4 || (argc == 5 && Jim_CompareStringImmediate(interp, argv[3], "-strict"))) return JimStringIs(interp, argv[argc - 1], argv[2], argc == 5); Jim_WrongNumArgs(interp, 2, argv, "class ?-strict? str"); return JIM_ERROR; } return JIM_OK; } // [time] static __device__ int Jim_TimeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "script ?count?"); return JIM_ERROR; } long count = 1; if (argc == 3 && Jim_GetLong(interp, argv[2], &count) != JIM_OK) return JIM_ERROR; if (count < 0) return JIM_OK; long i = count; jim_wide start = JimClock(); while (i-- > 0) { int retval = Jim_EvalObj(interp, argv[1]); if (retval != JIM_OK) return retval; } jim_wide elapsed = JimClock() - start; char buf[60]; const char *fmt = "%" JIM_WIDE_MODIFIER " microseconds per iteration"; sprintf(buf, fmt, count == 0 ? 0 : elapsed / count); Jim_SetResultString(interp, buf, -1); return JIM_OK; } // [exit] static __device__ int Jim_ExitCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc > 2) { Jim_WrongNumArgs(interp, 1, argv, "?exitCode?"); return JIM_ERROR; } long exitCode = 0; if (argc == 2 && Jim_GetLong(interp, argv[1], &exitCode) != JIM_OK) return JIM_ERROR; interp->exitCode = exitCode; return JIM_EXIT; } // [catch] static __device__ int Jim_CatchCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { // Which return codes are ignored (passed through)? By default, only exit, eval and signal jim_wide ignore_mask = (1 << JIM_EXIT) | (1 << JIM_EVAL) | (1 << JIM_SIGNAL); const int max_ignore_code = sizeof(ignore_mask) * 8; // Reset the error code before catch. Note that this is not strictly correct. Jim_SetVariableStr(interp, "errorCode", Jim_NewStringObj(interp, "NONE", -1), JIMGLOBAL_); int i; for (i = 1; i < argc - 1; i++) { const char *arg = Jim_String(argv[i]); jim_wide option; int ignore; // It's a pity we can't use Jim_GetEnum here :-( if (!strcmp(arg, "--")) { i++; break; } if (*arg != '-') break; if (!strncmp(arg, "-no", 3)) { arg += 3; ignore = 1; } else { arg++; ignore = 0; } if (Jim_StringToWide(arg, &option, 10) != JIM_OK) { option = -1; } if (option < 0) { option = Jim_FindByName(arg, jimReturnCodes, jimReturnCodesSize); } if (option < 0) { goto wrongargs; } if (ignore) ignore_mask |= (1 << option); else ignore_mask &= ~(1 << option); } argc -= i; if (argc < 1 || argc > 3) { wrongargs: Jim_WrongNumArgs(interp, 1, argv, "?-?no?code ... --? script ?resultVarName? ?optionVarName?"); return JIM_ERROR; } argv += i; int sig = 0; if ((ignore_mask & (1 << JIM_SIGNAL)) == 0) sig++; interp->signal_level += sig; // If a signal is set, don't even try to execute the body int exitCode = 0; if (Jim_CheckSignal(interp)) exitCode = JIM_SIGNAL; else { exitCode = Jim_EvalObj(interp, argv[0]); // Don't want any caught error included in a later stack trace interp->errorFlag = 0; } interp->signal_level -= sig; // Catch or pass through? Only the first 32/64 codes can be passed through if (exitCode >= 0 && exitCode < max_ignore_code && (((unsigned jim_wide)1 << exitCode) & ignore_mask)) return exitCode; // Not caught, pass it up if (sig && exitCode == JIM_SIGNAL) { // Catch the signal at this level if (interp->signal_set_result) interp->signal_set_result(interp, interp->sigmask); else Jim_SetResultInt(interp, interp->sigmask); interp->sigmask = 0; } if (argc >= 2) { if (Jim_SetVariable(interp, argv[1], Jim_GetResult(interp)) != JIM_OK) return JIM_ERROR; if (argc == 3) { Jim_Obj *optListObj = Jim_NewListObj(interp, NULL, 0); Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-code", -1)); Jim_ListAppendElement(interp, optListObj, Jim_NewIntObj(interp, exitCode == JIM_RETURN ? interp->returnCode : exitCode)); Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-level", -1)); Jim_ListAppendElement(interp, optListObj, Jim_NewIntObj(interp, interp->returnLevel)); if (exitCode == JIM_ERROR) { Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-errorinfo", -1)); Jim_ListAppendElement(interp, optListObj, interp->stackTrace); Jim_Obj *errorCode = Jim_GetVariableStr(interp, "errorCode", JIMGLOBAL_); if (errorCode) { Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-errorcode", -1)); Jim_ListAppendElement(interp, optListObj, errorCode); } } if (Jim_SetVariable(interp, argv[2], optListObj) != JIM_OK) return JIM_ERROR; } } Jim_SetResultInt(interp, exitCode); return JIM_OK; } #ifdef JIM_REFERENCES // [ref] static __device__ int Jim_RefCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 1, argv, "string tag ?finalizer?"); return JIM_ERROR; } if (argc == 3) Jim_SetResult(interp, Jim_NewReference(interp, argv[1], argv[2], NULL)); else Jim_SetResult(interp, Jim_NewReference(interp, argv[1], argv[2], argv[3])); return JIM_OK; } // [getref] static __device__ int Jim_GetrefCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "reference"); return JIM_ERROR; } Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, argv[1])) == NULL) return JIM_ERROR; Jim_SetResult(interp, refPtr->objPtr); return JIM_OK; } // [setref] static __device__ int Jim_SetrefCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "reference newValue"); return JIM_ERROR; } Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, argv[1])) == NULL) return JIM_ERROR; Jim_IncrRefCount(argv[2]); Jim_DecrRefCount(interp, refPtr->objPtr); refPtr->objPtr = argv[2]; Jim_SetResult(interp, argv[2]); return JIM_OK; } // [collect] static __device__ int Jim_CollectCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, ""); return JIM_ERROR; } Jim_SetResultInt(interp, Jim_Collect(interp)); // Free all the freed objects while (interp->freeList) { Jim_Obj *nextObjPtr = interp->freeList->nextObjPtr; Jim_Free(interp->freeList); interp->freeList = nextObjPtr; } return JIM_OK; } // [finalize] reference ?newValue? static __device__ int Jim_FinalizeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "reference ?finalizerProc?"); return JIM_ERROR; } if (argc == 2) { Jim_Obj *cmdNamePtr; if (Jim_GetFinalizer(interp, argv[1], &cmdNamePtr) != JIM_OK) return JIM_ERROR; if (cmdNamePtr != NULL) // otherwise the null string is returned Jim_SetResult(interp, cmdNamePtr); } else { if (Jim_SetFinalizer(interp, argv[1], argv[2]) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, argv[2]); } return JIM_OK; } // [info references] static __device__ int JimInfoReferences(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); Jim_HashTableIterator htiter; Jim_HashEntry *he; JimInitHashTableIterator(&interp->references, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) { char buf[JIM_REFERENCE_SPACE + 1]; Jim_Reference *refPtr = (Jim_Reference *)Jim_GetHashEntryVal(he); const unsigned long *refId = (const unsigned long *)he->key; JimFormatReference(buf, refPtr, *refId); Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, buf, -1)); } Jim_SetResult(interp, listObjPtr); return JIM_OK; } #endif // [rename] static __device__ int Jim_RenameCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "oldName newName"); return JIM_ERROR; } if (JimValidName(interp, "new procedure", argv[2])) return JIM_ERROR; return Jim_RenameCommand(interp, Jim_String(argv[1]), Jim_String(argv[2])); } #define JIM_DICTMATCH_VALUES 0x0001 typedef void JimDictMatchCallbackType(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type); static __device__ void JimDictMatchKeys(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type) { Jim_ListAppendElement(interp, listObjPtr, (Jim_Obj *)he->key); if (type & JIM_DICTMATCH_VALUES) Jim_ListAppendElement(interp, listObjPtr, (Jim_Obj *)Jim_GetHashEntryVal(he)); } // Like JimHashtablePatternMatch, but for dictionaries. static __device__ Jim_Obj *JimDictPatternMatch(Jim_Interp *interp, Jim_HashTable *ht, Jim_Obj *patternObjPtr, JimDictMatchCallbackType *callback, int type) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); // Check for the non-pattern case. We can do this much more efficiently. Jim_HashEntry *he; Jim_HashTableIterator htiter; JimInitHashTableIterator(ht, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) if (patternObjPtr == NULL || JimGlobMatch(Jim_String(patternObjPtr), Jim_String((Jim_Obj *)he->key), 0)) callback(interp, listObjPtr, he, type); return listObjPtr; } __device__ int Jim_DictKeys(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *patternObjPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, JimDictPatternMatch(interp, (Jim_HashTable *)objPtr->internalRep.ptr, patternObjPtr, JimDictMatchKeys, 0)); return JIM_OK; } __device__ int Jim_DictValues(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *patternObjPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, JimDictPatternMatch(interp, (Jim_HashTable *)objPtr->internalRep.ptr, patternObjPtr, JimDictMatchKeys, JIM_DICTMATCH_VALUES)); return JIM_OK; } __device__ int Jim_DictSize(Jim_Interp *interp, Jim_Obj *objPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return -1; return ((Jim_HashTable *)objPtr->internalRep.ptr)->used; } __device__ int Jim_DictInfo(Jim_Interp *interp, Jim_Obj *objPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_HashTable *ht = (Jim_HashTable *)objPtr->internalRep.ptr; // Note that this uses internal knowledge of the hash table printf("%d entries in table, %d buckets\n", ht->used, ht->size); for (unsigned int i = 0; i < ht->size; i++) { Jim_HashEntry *he = ht->table[i]; if (he) { printf("%d: ", i); while (he) { printf(" %s", Jim_String((Jim_Obj *)he->key)); he = he->next; } printf("\n"); } } return JIM_OK; } static __device__ int Jim_EvalEnsemble(Jim_Interp *interp, const char *basecmd, const char *subcmd, int argc, Jim_Obj *const *argv) { Jim_Obj *prefixObj = Jim_NewStringObj(interp, basecmd, -1); Jim_AppendString(interp, prefixObj, " ", 1); Jim_AppendString(interp, prefixObj, subcmd, -1); return Jim_EvalObjPrefix(interp, prefixObj, argc, argv); } // [dict] __constant__ static const char * const _dict_options[] = { "create", "get", "set", "unset", "exists", "keys", "size", "info", "merge", "with", "append", "lappend", "incr", "remove", "values", "for", "replace", "update", NULL }; static __device__ int Jim_DictCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_CREATE, OPT_GET, OPT_SET, OPT_UNSET, OPT_EXISTS, OPT_KEYS, OPT_SIZE, OPT_INFO, OPT_MERGE, OPT_WITH, OPT_APPEND, OPT_LAPPEND, OPT_INCR, OPT_REMOVE, OPT_VALUES, OPT_FOR, OPT_REPLACE, OPT_UPDATE, }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?arguments ...?"); return JIM_ERROR; } int option; if (Jim_GetEnum(interp, argv[1], _dict_options, &option, "subcommand", JIM_ERRMSG) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_GET: if (argc < 3) { Jim_WrongNumArgs(interp, 2, argv, "dictionary ?key ...?"); return JIM_ERROR; } Jim_Obj *objPtr; if (Jim_DictKeysVector(interp, argv[2], argv + 3, argc - 3, &objPtr, JIM_ERRMSG) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; case OPT_SET: if (argc < 5) { Jim_WrongNumArgs(interp, 2, argv, "varName key ?key ...? value"); return JIM_ERROR; } return Jim_SetDictKeysVector(interp, argv[2], argv + 3, argc - 4, argv[argc - 1], JIM_ERRMSG); case OPT_EXISTS: if (argc < 4) { Jim_WrongNumArgs(interp, 2, argv, "dictionary key ?key ...?"); return JIM_ERROR; } else { int rc = Jim_DictKeysVector(interp, argv[2], argv + 3, argc - 3, &objPtr, JIM_ERRMSG); if (rc < 0) return JIM_ERROR; Jim_SetResultBool(interp, rc == JIM_OK); return JIM_OK; } case OPT_UNSET: if (argc < 4) { Jim_WrongNumArgs(interp, 2, argv, "varName key ?key ...?"); return JIM_ERROR; } if (Jim_SetDictKeysVector(interp, argv[2], argv + 3, argc - 3, NULL, 0) != JIM_OK) return JIM_ERROR; return JIM_OK; case OPT_KEYS: if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "dictionary ?pattern?"); return JIM_ERROR; } return Jim_DictKeys(interp, argv[2], argc == 4 ? argv[3] : NULL); case OPT_SIZE: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "dictionary"); return JIM_ERROR; } else if (Jim_DictSize(interp, argv[2]) < 0) return JIM_ERROR; Jim_SetResultInt(interp, Jim_DictSize(interp, argv[2])); return JIM_OK; case OPT_MERGE: if (argc == 2) return JIM_OK; if (Jim_DictSize(interp, argv[2]) < 0) return JIM_ERROR; // Handle as ensemble break; case OPT_UPDATE: // Better error message if (argc < 6 || argc % 2) argc = 2; break; case OPT_CREATE: if (argc % 2) { Jim_WrongNumArgs(interp, 2, argv, "?key value ...?"); return JIM_ERROR; } objPtr = Jim_NewDictObj(interp, argv + 2, argc - 2); Jim_SetResult(interp, objPtr); return JIM_OK; case OPT_INFO: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "dictionary"); return JIM_ERROR; } return Jim_DictInfo(interp, argv[2]); } // Handle command as an ensemble return Jim_EvalEnsemble(interp, "dict", _dict_options[option], argc - 2, argv + 2); } // [subst] __constant__ static const char *const _subst_options[] = { "-nobackslashes", "-nocommands", "-novariables", NULL }; static __device__ int Jim_SubstCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_NOBACKSLASHES, OPT_NOCOMMANDS, OPT_NOVARIABLES }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "?options? string"); return JIM_ERROR; } int flags = JIM_SUBST_FLAG; for (int i = 1; i < (argc - 1); i++) { int option; if (Jim_GetEnum(interp, argv[i], _subst_options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_NOBACKSLASHES: flags |= JIM_SUBST_NOESC; break; case OPT_NOCOMMANDS: flags |= JIM_SUBST_NOCMD; break; case OPT_NOVARIABLES: flags |= JIM_SUBST_NOVAR; break; } } Jim_Obj *objPtr; if (Jim_SubstObj(interp, argv[argc - 1], &objPtr, flags) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // [info] __constant__ static const char *const _info_commands[] = { "body", "statics", "commands", "procs", "channels", "exists", "globals", "level", "frame", "locals", "vars", "version", "patchlevel", "complete", "args", "hostname", "script", "source", "stacktrace", "nameofexecutable", "returncodes", "references", "alias", NULL }; static __device__ int Jim_InfoCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { INFO_BODY, INFO_STATICS, INFO_COMMANDS, INFO_PROCS, INFO_CHANNELS, INFO_EXISTS, INFOGLOBAL_S, INFO_LEVEL, INFO_FRAME, INFO_LOCALS, INFO_VARS, INFO_VERSION, INFO_PATCHLEVEL, INFO_COMPLETE, INFO_ARGS, INFO_HOSTNAME, INFO_SCRIPT, INFO_SOURCE, INFO_STACKTRACE, INFO_NAMEOFEXECUTABLE, INFO_RETURNCODES, INFO_REFERENCES, INFO_ALIAS, }; #ifdef jim_ext_namespace int nons = 0; if (argc > 2 && Jim_CompareStringImmediate(interp, argv[1], "-nons")) { // This is for internal use only argc--; argv++; nons = 1; } #endif if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?args ...?"); return JIM_ERROR; } int cmd; if (Jim_GetEnum(interp, argv[1], _info_commands, &cmd, "subcommand", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; // Test for the the most common commands first, just in case it makes a difference Jim_Obj *objPtr; int mode = 0; switch (cmd) { case INFO_EXISTS: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "varName"); return JIM_ERROR; } Jim_SetResultBool(interp, Jim_GetVariable(interp, argv[2], 0) != NULL); break; case INFO_ALIAS:{ Jim_Cmd *cmdPtr; if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "command"); return JIM_ERROR; } if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) return JIM_ERROR; if (cmdPtr->isproc || cmdPtr->u.native.cmdProc != JimAliasCmd) { Jim_SetResultFormatted(interp, "command \"%#s\" is not an alias", argv[2]); return JIM_ERROR; } Jim_SetResult(interp, (Jim_Obj *)cmdPtr->u.native.privData); return JIM_OK; } case INFO_CHANNELS: mode++; // JIM_CMDLIST_CHANNELS #ifndef jim_ext_aio Jim_SetResultString(interp, "aio not enabled", -1); return JIM_ERROR; #endif case INFO_PROCS: mode++; // JIM_CMDLIST_PROCS case INFO_COMMANDS: // mode 0 => JIM_CMDLIST_COMMANDS if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "?pattern?"); return JIM_ERROR; } #ifdef jim_ext_namespace if (!nons) if (Jim_Length(interp->framePtr->nsObj) || (argc == 3 && JimGlobMatch("::*", Jim_String(argv[2]), 0))) return Jim_EvalPrefix(interp, "namespace info", argc - 1, argv + 1); #endif Jim_SetResult(interp, JimCommandsList(interp, argc == 3 ? argv[2] : NULL, mode)); break; case INFO_VARS: mode++; // JIM_VARLIST_VARS case INFO_LOCALS: mode++; // JIM_VARLIST_LOCALS case INFOGLOBAL_S: // mode 0 => JIM_VARLISTGLOBAL_S if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "?pattern?"); return JIM_ERROR; } #ifdef jim_ext_namespace if (!nons) if (Jim_Length(interp->framePtr->nsObj) || (argc == 3 && JimGlobMatch("::*", Jim_String(argv[2]), 0))) return Jim_EvalPrefix(interp, "namespace info", argc - 1, argv + 1); #endif Jim_SetResult(interp, JimVariablesList(interp, argc == 3 ? argv[2] : NULL, mode)); break; case INFO_SCRIPT: if (argc != 2) { Jim_WrongNumArgs(interp, 2, argv, ""); return JIM_ERROR; } Jim_SetResult(interp, JimGetScript(interp, interp->currentScriptObj)->fileNameObj); break; case INFO_SOURCE:{ if (argc != 3 && argc != 5) { Jim_WrongNumArgs(interp, 2, argv, "source ?filename line?"); return JIM_ERROR; } jim_wide line; Jim_Obj *resObjPtr; if (argc == 5) { if (Jim_GetWide(interp, argv[4], &line) != JIM_OK) return JIM_ERROR; resObjPtr = Jim_NewStringObj(interp, Jim_String(argv[2]), Jim_Length(argv[2])); JimSetSourceInfo(interp, resObjPtr, argv[3], (int)line); } else { Jim_Obj *fileNameObj; if (argv[2]->typePtr == &_sourceObjType) { fileNameObj = argv[2]->internalRep.sourceValue.fileNameObj; line = argv[2]->internalRep.sourceValue.lineNumber; } else if (argv[2]->typePtr == &_scriptObjType) { ScriptObj *script = JimGetScript(interp, argv[2]); fileNameObj = script->fileNameObj; line = script->firstline; } else { fileNameObj = interp->emptyObj; line = 1; } resObjPtr = Jim_NewListObj(interp, NULL, 0); Jim_ListAppendElement(interp, resObjPtr, fileNameObj); Jim_ListAppendElement(interp, resObjPtr, Jim_NewIntObj(interp, line)); } Jim_SetResult(interp, resObjPtr); break; } case INFO_STACKTRACE: Jim_SetResult(interp, interp->stackTrace); break; case INFO_LEVEL: case INFO_FRAME: switch (argc) { case 2: Jim_SetResultInt(interp, interp->framePtr->level); break; case 3: if (JimInfoLevel(interp, argv[2], &objPtr, cmd == INFO_LEVEL) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, objPtr); break; default: Jim_WrongNumArgs(interp, 2, argv, "?levelNum?"); return JIM_ERROR; } break; case INFO_BODY: case INFO_STATICS: case INFO_ARGS:{ if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "procname"); return JIM_ERROR; } Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) return JIM_ERROR; if (!cmdPtr->isproc) { Jim_SetResultFormatted(interp, "command \"%#s\" is not a procedure", argv[2]); return JIM_ERROR; } switch (cmd) { case INFO_BODY: Jim_SetResult(interp, cmdPtr->u.proc.bodyObjPtr); break; case INFO_ARGS: Jim_SetResult(interp, cmdPtr->u.proc.argListObjPtr); break; case INFO_STATICS: if (cmdPtr->u.proc.staticVars) { int mode = JIM_VARLIST_LOCALS | JIM_VARLIST_VALUES; Jim_SetResult(interp, JimHashtablePatternMatch(interp, cmdPtr->u.proc.staticVars, NULL, JimVariablesMatch, mode)); } break; } break; } case INFO_VERSION: case INFO_PATCHLEVEL: { char buf[(JIM_INTEGER_SPACE * 2) + 1]; sprintf(buf, "%d.%d", JIM_VERSION / 100, JIM_VERSION % 100); Jim_SetResultString(interp, buf, -1); break; } case INFO_COMPLETE: if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "script ?missing?"); return JIM_ERROR; } else { int len; const char *s = Jim_GetString(argv[2], &len); char missing; Jim_SetResultBool(interp, Jim_ScriptIsComplete(s, len, &missing)); if (missing != ' ' && argc == 4) Jim_SetVariable(interp, argv[3], Jim_NewStringObj(interp, &missing, 1)); } break; case INFO_HOSTNAME: return Jim_Eval(interp, "os.gethostname"); // Redirect to os.gethostname if it exists case INFO_NAMEOFEXECUTABLE: return Jim_Eval(interp, "{info nameofexecutable}"); // Redirect to Tcl proc case INFO_RETURNCODES: if (argc == 2) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; jimReturnCodes[i]; i++) { Jim_ListAppendElement(interp, listObjPtr, Jim_NewIntObj(interp, i)); Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, jimReturnCodes[i], -1)); } Jim_SetResult(interp, listObjPtr); } else if (argc == 3) { long code; if (Jim_GetLong(interp, argv[2], &code) != JIM_OK) return JIM_ERROR; const char *name = Jim_ReturnCode(code); if (*name == '?') Jim_SetResultInt(interp, code); else Jim_SetResultString(interp, name, -1); } else { Jim_WrongNumArgs(interp, 2, argv, "?code?"); return JIM_ERROR; } break; case INFO_REFERENCES: #ifdef JIM_REFERENCES return JimInfoReferences(interp, argc, argv); #else Jim_SetResultString(interp, "not supported", -1); return JIM_ERROR; #endif } return JIM_OK; } // [exists] __constant__ static const char *const _exists_options[] = { "-command", "-proc", "-alias", "-var", NULL }; static __device__ int Jim_ExistsCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_COMMAND, OPT_PROC, OPT_ALIAS, OPT_VAR }; int option; Jim_Obj *objPtr; if (argc == 2) { option = OPT_VAR; objPtr = argv[1]; } else if (argc == 3) { if (Jim_GetEnum(interp, argv[1], _exists_options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; objPtr = argv[2]; } else { Jim_WrongNumArgs(interp, 1, argv, "?option? name"); return JIM_ERROR; } int result = 0; if (option == OPT_VAR) result = Jim_GetVariable(interp, objPtr, 0) != NULL; else { // Now different kinds of commands Jim_Cmd *cmd = Jim_GetCommand(interp, objPtr, JIM_NONE); if (cmd) switch (option) { case OPT_COMMAND: result = 1; break; case OPT_ALIAS: result = cmd->isproc == 0 && cmd->u.native.cmdProc == JimAliasCmd; break; case OPT_PROC: result = cmd->isproc; break; } } Jim_SetResultBool(interp, result); return JIM_OK; } // [split] static __device__ int Jim_SplitCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "string ?splitChars?"); return JIM_ERROR; } int len; const char *str = Jim_GetString(argv[1], &len); if (len == 0) return JIM_OK; int strLen = Jim_Utf8Length(interp, argv[1]); // Init const char *splitChars; int splitLen; if (argc == 2) { splitChars = " \n\t\r"; splitLen = 4; } else { splitChars = Jim_String(argv[2]); splitLen = Jim_Utf8Length(interp, argv[2]); } const char *noMatchStart = str; Jim_Obj *resObjPtr = Jim_NewListObj(interp, NULL, 0); // Split int c; if (splitLen) { Jim_Obj *objPtr; while (strLen--) { const char *sc = splitChars; int scLen = splitLen; int sl = utf8_tounicode(str, &c); while (scLen--) { int pc; sc += utf8_tounicode(sc, &pc); if (c == pc) { objPtr = Jim_NewStringObj(interp, noMatchStart, (int)(str - noMatchStart)); Jim_ListAppendElement(interp, resObjPtr, objPtr); noMatchStart = str + sl; break; } } str += sl; } objPtr = Jim_NewStringObj(interp, noMatchStart, (int)(str - noMatchStart)); Jim_ListAppendElement(interp, resObjPtr, objPtr); } else { // This handles the special case of splitchars eq {} Optimise by sharing common (ASCII) characters Jim_Obj **commonObj = NULL; #define NUM_COMMON (128 - 9) while (strLen--) { int n = utf8_tounicode(str, &c); #ifdef JIM_OPTIMIZATION if (c >= 9 && c < 128) { // Common ASCII char. Note that 9 is the tab character c -= 9; if (!commonObj) { commonObj = (Jim_Obj **)Jim_Alloc(sizeof(*commonObj) * NUM_COMMON); memset(commonObj, 0, sizeof(*commonObj) * NUM_COMMON); } if (!commonObj[c]) commonObj[c] = Jim_NewStringObj(interp, str, 1); Jim_ListAppendElement(interp, resObjPtr, commonObj[c]); str++; continue; } #endif Jim_ListAppendElement(interp, resObjPtr, Jim_NewStringObjUtf8(interp, str, 1)); str += n; } Jim_Free(commonObj); } Jim_SetResult(interp, resObjPtr); return JIM_OK; } // [join] static __device__ int Jim_JoinCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "list ?joinString?"); return JIM_ERROR; } // Init const char *joinStr; int joinStrLen; if (argc == 2) { joinStr = " "; joinStrLen = 1; } else joinStr = Jim_GetString(argv[2], &joinStrLen); Jim_SetResult(interp, Jim_ListJoin(interp, argv[1], joinStr, joinStrLen)); return JIM_OK; } // [format] static __device__ int Jim_FormatCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "formatString ?arg arg ...?"); return JIM_ERROR; } Jim_Obj *objPtr = Jim_FormatString(interp, argv[1], argc - 2, argv + 2); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // [scan] static __device__ int Jim_ScanCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "string format ?varName varName ...?"); return JIM_ERROR; } if (argv[2]->typePtr != &_scanFmtStringObjType) SetScanFmtFromAny(interp, argv[2]); if (FormatGetError(argv[2]) != 0) { Jim_SetResultString(interp, FormatGetError(argv[2]), -1); return JIM_ERROR; } if (argc > 3) { int maxPos = (int)FormatGetMaxPos(argv[2]); int count = (int)FormatGetCnvCount(argv[2]); if (maxPos > argc - 3) { Jim_SetResultString(interp, "\"%n$\" argument index out of range", -1); return JIM_ERROR; } else if (count > argc - 3) { Jim_SetResultString(interp, "different numbers of variable names and field specifiers", -1); return JIM_ERROR; } else if (count < argc - 3) { Jim_SetResultString(interp, "variable is not assigned by any conversion specifiers", -1); return JIM_ERROR; } } Jim_Obj *listPtr = Jim_ScanString(interp, argv[1], argv[2], JIM_ERRMSG); if (listPtr == 0) return JIM_ERROR; if (argc > 3) { int rc = JIM_OK; int count = 0; if (listPtr != 0 && listPtr != (Jim_Obj *)EOF) { int len = Jim_ListLength(interp, listPtr); if (len != 0) { Jim_Obj **outVec; int outc; JimListGetElements(interp, listPtr, &outc, &outVec); for (int i = 0; i < outc; ++i) if (Jim_Length(outVec[i]) > 0) { ++count; if (Jim_SetVariable(interp, argv[3 + i], outVec[i]) != JIM_OK) rc = JIM_ERROR; } } Jim_FreeNewObj(interp, listPtr); } else count = -1; if (rc == JIM_OK) Jim_SetResultInt(interp, count); return rc; } else { if (listPtr == (Jim_Obj *)EOF) { Jim_SetResult(interp, Jim_NewListObj(interp, 0, 0)); return JIM_OK; } Jim_SetResult(interp, listPtr); } return JIM_OK; } // [error] static __device__ int Jim_ErrorCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "message ?stacktrace?"); return JIM_ERROR; } Jim_SetResult(interp, argv[1]); if (argc == 3) { JimSetStackTrace(interp, argv[2]); return JIM_ERROR; } interp->addStackTrace++; return JIM_ERROR; } // [lrange] static __device__ int Jim_LrangeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 4) { Jim_WrongNumArgs(interp, 1, argv, "list first last"); return JIM_ERROR; } Jim_Obj *objPtr; if ((objPtr = Jim_ListRange(interp, argv[1], argv[2], argv[3])) == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // [lrepeat] static __device__ int Jim_LrepeatCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { long count; if (argc < 2 || Jim_GetLong(interp, argv[1], &count) != JIM_OK || count < 0) { Jim_WrongNumArgs(interp, 1, argv, "count ?value ...?"); return JIM_ERROR; } if (count == 0 || argc == 2) return JIM_OK; argc -= 2; argv += 2; Jim_Obj *objPtr = Jim_NewListObj(interp, argv, argc); while (--count) ListInsertElements(objPtr, -1, argc, argv); Jim_SetResult(interp, objPtr); return JIM_OK; } __device__ char **Jim_GetEnviron() { #if __HIPCC__ return nullptr; #else #ifdef HAVE__NSGETENVIRON return *_NSGetEnviron(); #else #ifndef NO_ENVIRON_EXTERN extern char **environ; #endif return environ; #endif #endif } __device__ void Jim_SetEnviron(char **env) { #if __HIPCC__ return; #else #ifdef HAVE__NSGETENVIRON *_NSGetEnviron() = env; #else #ifndef NO_ENVIRON_EXTERN extern char **environ; #endif environ = env; #endif #endif } // [env] static __device__ int Jim_EnvCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc == 1) { char **e = Jim_GetEnviron(); Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; e[i]; i++) { const char *equals = strchr(e[i], '='); if (equals) { Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, e[i], (int)(equals - e[i]))); Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, equals + 1, -1)); } } Jim_SetResult(interp, listObjPtr); return JIM_OK; } if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?default?"); return JIM_ERROR; } const char *key = Jim_String(argv[1]); const char *val = getenv(key); if (val == NULL) { if (argc < 3) { Jim_SetResultFormatted(interp, "environment variable \"%#s\" does not exist", argv[1]); return JIM_ERROR; } val = Jim_String(argv[2]); } Jim_SetResult(interp, Jim_NewStringObj(interp, val, -1)); return JIM_OK; } // [source] static __device__ int Jim_SourceCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "fileName"); return JIM_ERROR; } int retval = Jim_EvalFile(interp, Jim_String(argv[1])); return (retval == JIM_RETURN ? JIM_OK : retval); } // [lreverse] static __device__ int Jim_LreverseCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "list"); return JIM_ERROR; } int len; Jim_Obj **ele; JimListGetElements(interp, argv[1], &len, &ele); len--; Jim_Obj *revObjPtr = Jim_NewListObj(interp, NULL, 0); while (len >= 0) ListAppendElement(revObjPtr, ele[len--]); Jim_SetResult(interp, revObjPtr); return JIM_OK; } static __device__ int JimRangeLen(jim_wide start, jim_wide end, jim_wide step) { if (step == 0) return -1; if (start == end) return 0; else if (step > 0 && start > end) return -1; else if (step < 0 && end > start) return -1; jim_wide len = end - start; if (len < 0) len = -len; // abs(len) if (step < 0) step = -step; // abs(step) len = 1 + ((len - 1) / step); // We can truncate safely to INT_MAX, the range command will always return an error for a such long range because Tcl lists can't be so long. if (len > INT_MAX) len = INT_MAX; return (int)(len < 0 ? -1 : len); } // [range] static __device__ int Jim_RangeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2 || argc > 4) { Jim_WrongNumArgs(interp, 1, argv, "?start? end ?step?"); return JIM_ERROR; } jim_wide start = 0, end, step = 1; if (argc == 2) { if (Jim_GetWide(interp, argv[1], &end) != JIM_OK) return JIM_ERROR; } else { if (Jim_GetWide(interp, argv[1], &start) != JIM_OK || Jim_GetWide(interp, argv[2], &end) != JIM_OK) return JIM_ERROR; if (argc == 4 && Jim_GetWide(interp, argv[3], &step) != JIM_OK) return JIM_ERROR; } int len; if ((len = JimRangeLen(start, end, step)) == -1) { Jim_SetResultString(interp, "Invalid (infinite?) range specified", -1); return JIM_ERROR; } Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; i < len; i++) ListAppendElement(objPtr, Jim_NewIntObj(interp, start + i * step)); Jim_SetResult(interp, objPtr); return JIM_OK; } // [rand] static __device__ int Jim_RandCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 1 || argc > 3) { Jim_WrongNumArgs(interp, 1, argv, "?min? max"); return JIM_ERROR; } jim_wide min = 0, max = 0, len, maxMul; if (argc == 1) { max = JIM_WIDE_MAX; } else if (argc == 2) { if (Jim_GetWide(interp, argv[1], &max) != JIM_OK) return JIM_ERROR; } else if (argc == 3) { if (Jim_GetWide(interp, argv[1], &min) != JIM_OK || Jim_GetWide(interp, argv[2], &max) != JIM_OK) return JIM_ERROR; } len = max-min; if (len < 0) { Jim_SetResultString(interp, "Invalid arguments (max < min)", -1); return JIM_ERROR; } maxMul = JIM_WIDE_MAX - (len ? (JIM_WIDE_MAX%len) : 0); while (1) { jim_wide r; JimRandomBytes(interp, &r, sizeof(jim_wide)); if (r < 0 || r >= maxMul) continue; r = (len == 0 ? 0 : r%len); Jim_SetResultInt(interp, min+r); return JIM_OK; } } static __device__ int InterpObjCmd(ClientData clientData, Jim_Interp *interp, int argc, Jim_Obj *const args[]) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, args, "SUBCOMMAND ..."); return JIM_ERROR; } return JIM_OK; } static __device__ void InterpDeleteCmd(ClientData data, Jim_Interp *interp) { Jim_Interp *p = (Jim_Interp *)data; Jim_FreeInterp(p); } // [interp] *added* __constant__ static const char *const _interp_commands[] = { "create", "alias", "eval", "delete", NULL }; static __device__ int Jim_InterpCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { INTERP_CREATE, INTERP_ALIAS, INTERP_EVAL, INTERP_DELETE }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?args ...?"); return JIM_ERROR; } int cmd; if (Jim_GetEnum(interp, argv[1], _interp_commands, &cmd, "subcommand", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; switch (cmd) { case INTERP_CREATE: { const char *arg; //bool safe = false; if (argc == 4) { arg = Jim_String(argv[1]); if (!strcmp(arg, "-safe")) { argc--; //safe = true; } } if (argc < 3) { Jim_WrongNumArgs(interp, 2, argv, "CREATE ?-safe? path"); return JIM_ERROR; } Jim_Interp *p = Jim_CreateInterp(); if (!p) { Jim_SetResultString(interp, "malloc failed", -1); return JIM_ERROR; } Jim_RegisterCoreCommands(p); arg = Jim_String(argv[2]); Jim_CreateCommand(interp, arg, (Jim_CmdProc *)InterpObjCmd, (ClientData)p, InterpDeleteCmd); return JIM_OK; } case INTERP_ALIAS: { return JIM_OK; } case INTERP_EVAL: { if (argc < 3) { Jim_WrongNumArgs(interp, 2, argv, "name arg ?arg ...?"); return JIM_ERROR; } Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) { Jim_SetResultFormatted(interp, "Unable to find interp \"%#s\"", Jim_String(argv[2])); return JIM_ERROR; } if (cmdPtr->isproc) { Jim_SetResultFormatted(interp, "Variable \"%#s\" is a procedure", Jim_String(argv[2])); return JIM_ERROR; } Jim_Interp *p = (Jim_Interp *)cmdPtr->u.native.privData; int rc = Jim_EvalObj(p, argc == 4 ? argv[3] : Jim_ConcatObj(p, argc - 3, argv + 3)); // eval is "interesting", so add a stack frame here if (rc == JIM_ERROR) { Jim_Obj *scriptObjPtr = p->currentScriptObj; ScriptObj *script = JimGetScript(p, scriptObjPtr); if (!JimScriptValid(p, script)) { Jim_DecrRefCount(p, scriptObjPtr); return JIM_ERROR; } //interp->errorFileNameObj = p->errorFileNameObj; //interp->errorFlag = p->errorFlag; //interp->errorLine = p->errorLine; //interp->errorProc = p->errorProc; JimAddErrorToStack(interp, script); } return rc; } case INTERP_DELETE: { Jim_DeleteCommand(interp, Jim_String(argv[2])); return JIM_OK; } } return JIM_OK; } __constant__ static const struct { const char *name; Jim_CmdProc *cmdProc; } Jim_CoreCommandsTable[] = { {"alias", Jim_AliasCoreCommand}, {"set", Jim_SetCoreCommand}, {"unset", Jim_UnsetCoreCommand}, {"puts", Jim_PutsCoreCommand}, {"+", Jim_AddCoreCommand}, {"*", Jim_MulCoreCommand}, {"-", Jim_SubCoreCommand}, {"/", Jim_DivCoreCommand}, {"incr", Jim_IncrCoreCommand}, {"while", Jim_WhileCoreCommand}, {"loop", Jim_LoopCoreCommand}, {"for", Jim_ForCoreCommand}, {"foreach", Jim_ForeachCoreCommand}, {"lmap", Jim_LmapCoreCommand}, {"lassign", Jim_LassignCoreCommand}, {"if", Jim_IfCoreCommand}, {"switch", Jim_SwitchCoreCommand}, {"list", Jim_ListCoreCommand}, {"lindex", Jim_LindexCoreCommand}, {"lset", Jim_LsetCoreCommand}, {"lsearch", Jim_LsearchCoreCommand}, {"llength", Jim_LlengthCoreCommand}, {"lappend", Jim_LappendCoreCommand}, {"linsert", Jim_LinsertCoreCommand}, {"lreplace", Jim_LreplaceCoreCommand}, {"lsort", Jim_LsortCoreCommand}, {"append", Jim_AppendCoreCommand}, {"debug", Jim_DebugCoreCommand}, {"eval", Jim_EvalCoreCommand}, {"uplevel", Jim_UplevelCoreCommand}, {"expr", Jim_ExprCoreCommand}, {"break", Jim_BreakCoreCommand}, {"continue", Jim_ContinueCoreCommand}, {"proc", Jim_ProcCoreCommand}, {"concat", Jim_ConcatCoreCommand}, {"return", Jim_ReturnCoreCommand}, {"upvar", Jim_UpvarCoreCommand}, {"global", JimGLOBAL_CoreCommand}, {"string", Jim_StringCoreCommand}, {"time", Jim_TimeCoreCommand}, {"exit", Jim_ExitCoreCommand}, {"catch", Jim_CatchCoreCommand}, #ifdef JIM_REFERENCES {"ref", Jim_RefCoreCommand}, {"getref", Jim_GetrefCoreCommand}, {"setref", Jim_SetrefCoreCommand}, {"finalize", Jim_FinalizeCoreCommand}, {"collect", Jim_CollectCoreCommand}, #endif {"rename", Jim_RenameCoreCommand}, {"dict", Jim_DictCoreCommand}, {"subst", Jim_SubstCoreCommand}, {"info", Jim_InfoCoreCommand}, {"exists", Jim_ExistsCoreCommand}, {"split", Jim_SplitCoreCommand}, {"join", Jim_JoinCoreCommand}, {"format", Jim_FormatCoreCommand}, {"scan", Jim_ScanCoreCommand}, {"error", Jim_ErrorCoreCommand}, {"lrange", Jim_LrangeCoreCommand}, {"lrepeat", Jim_LrepeatCoreCommand}, {"env", Jim_EnvCoreCommand}, {"source", Jim_SourceCoreCommand}, {"lreverse", Jim_LreverseCoreCommand}, {"range", Jim_RangeCoreCommand}, {"rand", Jim_RandCoreCommand}, {"tailcall", Jim_TailcallCoreCommand}, {"local", Jim_LocalCoreCommand}, {"upcall", Jim_UpcallCoreCommand}, {"apply", Jim_ApplyCoreCommand}, {"interp", Jim_InterpCoreCommand}, {NULL, NULL}, }; __device__ void Jim_RegisterCoreCommands(Jim_Interp *interp) { int i = 0; while (Jim_CoreCommandsTable[i].name != NULL) { Jim_CreateCommand(interp, Jim_CoreCommandsTable[i].name, Jim_CoreCommandsTable[i].cmdProc, NULL, NULL); i++; } } #pragma endregion // ----------------------------------------------------------------------------- // Interactive prompt // ----------------------------------------------------------------------------- #pragma region Interactive prompt __device__ void Jim_MakeErrorMessage(Jim_Interp *interp) { Jim_Obj *argv[2]; argv[0] = Jim_NewStringObj(interp, "errorInfo", -1); argv[1] = interp->result; Jim_EvalObjVector(interp, 2, argv); } static __device__ void JimSetFailedEnumResult(Jim_Interp *interp, const char *arg, const char *badtype, const char *prefix, const char *const *tablePtr, const char *name) { int count; for (count = 0; tablePtr[count]; count++) { } if (name == NULL) name = "option"; Jim_SetResultFormatted(interp, "%s%s \"%s\": must be ", badtype, name, arg); char **tablePtrSorted = (char **)Jim_Alloc(sizeof(char *) * count); memcpy(tablePtrSorted, tablePtr, sizeof(char *) * count); qsort(tablePtrSorted, count, sizeof(char *), qsortCompareStringPointers); for (int i = 0; i < count; i++) { if (i + 1 == count && count > 1) Jim_AppendString(interp, Jim_GetResult(interp), "or ", -1); Jim_AppendStrings(interp, Jim_GetResult(interp), prefix, tablePtrSorted[i], NULL); if (i + 1 != count) Jim_AppendString(interp, Jim_GetResult(interp), ", ", -1); } Jim_Free(tablePtrSorted); } __device__ int Jim_GetEnum(Jim_Interp *interp, Jim_Obj *objPtr, const char *const *tablePtr, int *indexPtr, const char *name, int flags) { const char *bad = "bad "; const char *const *entryPtr = NULL; int i; int arglen; const char *arg = Jim_GetString(objPtr, &arglen); int match = -1; *indexPtr = -1; for (entryPtr = tablePtr, i = 0; *entryPtr != NULL; entryPtr++, i++) { if (Jim_CompareStringImmediate(interp, objPtr, *entryPtr)) { // Found an exact match *indexPtr = i; return JIM_OK; } // Accept an unambiguous abbreviation. Note that '-' doesnt' consitute a valid abbreviation if (flags & JIM_ENUM_ABBREV && !strncmp(arg, *entryPtr, arglen)) { if (*arg == '-' && arglen == 1) break; if (match >= 0) { bad = "ambiguous "; goto ambiguous; } match = i; } } // If we had an unambiguous partial match if (match >= 0) { *indexPtr = match; return JIM_OK; } ambiguous: if (flags & JIM_ERRMSG) JimSetFailedEnumResult(interp, arg, bad, "", tablePtr, name); return JIM_ERROR; } __device__ int Jim_GetEnumFromStruct(Jim_Interp *interp, Jim_Obj *objPtr, const void **tablePtr, int elementSize, int *indexPtr, const char *name, int flags) { const char *bad = "bad "; const void **entryPtrStruct = NULL; const char *const *entryPtr = NULL; int i; int arglen; const char *arg = Jim_GetString(objPtr, &arglen); int match = -1; *indexPtr = -1; for (entryPtrStruct = tablePtr, i = 0; *entryPtrStruct != NULL; entryPtrStruct+=elementSize, i++) { entryPtr = (const char *const *)entryPtrStruct; if (Jim_CompareStringImmediate(interp, objPtr, *entryPtr)) { // Found an exact match *indexPtr = i; return JIM_OK; } // Accept an unambiguous abbreviation. Note that '-' doesnt' consitute a valid abbreviation if (flags & JIM_ENUM_ABBREV && !strncmp(arg, *entryPtr, arglen)) { if (*arg == '-' && arglen == 1) break; if (match >= 0) { bad = "ambiguous "; goto ambiguous; } match = i; } } // If we had an unambiguous partial match if (match >= 0) { *indexPtr = match; return JIM_OK; } ambiguous: if (flags & JIM_ERRMSG) JimSetFailedEnumResult(interp, arg, bad, "", (const char *const *)tablePtr, name); return JIM_ERROR; } __device__ int Jim_FindByName(const char *name, const char * const array[], size_t len) { for (int i = 0; i < (int)len; i++) if (array[i] && !strcmp(array[i], name)) return i; return -1; } __device__ int Jim_IsDict(Jim_Obj *objPtr) { return objPtr->typePtr == &_dictObjType; } __device__ int Jim_IsList(Jim_Obj *objPtr) { return objPtr->typePtr == &_listObjType; } // Very simple printf-like formatting, designed for error messages. // // The format may contain up to 5 '%s' or '%#s', corresponding to variable arguments. // The resulting string is created and set as the result. // // Each '%s' should correspond to a regular string parameter. // Each '%#s' should correspond to a (Jim_Obj *) parameter. // Any other printf specifier is not allowed (but %% is allowed for the % character). // // e.g. Jim_SetResultFormatted(interp, "Bad option \"%#s\" in proc \"%#s\"", optionObjPtr, procNamePtr); // // Note: We take advantage of the fact that printf has the same behaviour for both %s and %#s __device__ void Jim_SetResultFormatted(Jim_Interp *interp, const char *format, ...) { va_list va; va_start(va, format); // Initial space needed int len = strlen(format); int extra = 0; int n = 0; const char *params[5]; for (int i = 0; i < len && n < 5; i++) { int l; if (!strncmp(format + i, "%s", 2)) { params[n] = va_arg(va, char *); l = strlen(params[n]); } else if (!strncmp(format + i, "%#s", 3)) { Jim_Obj *objPtr = va_arg(va, Jim_Obj *); params[n] = Jim_GetString(objPtr, &l); } else { if (format[i] == '%') i++; continue; } n++; extra += l; } len += extra; char *buf = (char *)Jim_Alloc(len + 1); len = snprintf(buf, len + 1, format, params[0], params[1], params[2], params[3], params[4]); Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, buf, len)); va_end(va); } #pragma endregion // ----------------------------------------------------------------------------- // CommandInfo Command *Added* // ----------------------------------------------------------------------------- #pragma region CommandInfo Command *Added* __device__ int Jim_GetCommandInfoStr(Jim_Interp *interp, const char *name, Jim_CmdInfo *cmdInfo) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); int ret = Jim_GetCommandInfo(interp, nameObjPtr, cmdInfo); Jim_DecrRefCount(interp, nameObjPtr); return ret; } __device__ int Jim_GetCommandInfo(Jim_Interp *interp, Jim_Obj *objPtr, Jim_CmdInfo *cmdInfo) { Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, objPtr, JIM_ERRMSG)) == NULL) return 0; if (cmdPtr->isproc) { Jim_SetResultFormatted(interp, "command \"%#s\" is a procedure", objPtr); return 0; } cmdInfo->objProc = cmdPtr->u.native.cmdProc; cmdInfo->objClientData = cmdPtr->u.native.privData; cmdInfo->deleteProc = (void *)cmdPtr->u.native.delProc; return 1; } __device__ int Jim_SetCommandInfoStr(Jim_Interp *interp, const char *name, Jim_CmdInfo *cmdInfo) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); int ret = Jim_SetCommandInfo(interp, nameObjPtr, cmdInfo); Jim_DecrRefCount(interp, nameObjPtr); return ret; } __device__ int Jim_SetCommandInfo(Jim_Interp *interp, Jim_Obj *objPtr, Jim_CmdInfo *cmdInfo) { Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, objPtr, JIM_ERRMSG)) == NULL) return 0; if (cmdPtr->isproc) { Jim_SetResultFormatted(interp, "command \"%#s\" is a procedure", objPtr); return 0; } cmdPtr->u.native.cmdProc = cmdInfo->objProc; cmdPtr->u.native.privData = cmdInfo->objClientData; cmdPtr->u.native.delProc = (Jim_DelCmdProc *)cmdInfo->deleteProc; return 1; } #pragma endregion // ----------------------------------------------------------------------------- // Variable Command *Added* // ----------------------------------------------------------------------------- #pragma region Variable Command *Added* __device__ Jim_Obj *Jim_GetVar2(Jim_Interp *interp, const char *name, const char *key, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, key, -1); Jim_Obj *keyObjPtr = Jim_NewStringObj(interp, key, -1); Jim_IncrRefCount(nameObjPtr); Jim_IncrRefCount(keyObjPtr); Jim_Obj *obj; Jim_DictKeysVector(interp, nameObjPtr, &keyObjPtr, 1, &obj, flags); Jim_DecrRefCount(interp, keyObjPtr); Jim_DecrRefCount(interp, nameObjPtr); if (global) interp->framePtr = savedFramePtr; return obj; } __device__ int Jim_SetVar2(Jim_Interp *interp, const char *name, const char *key, const char *val, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_Obj *keyObjPtr = Jim_NewStringObj(interp, key, -1); Jim_Obj *valObjPtr = Jim_NewStringObj(interp, val, -1); Jim_IncrRefCount(nameObjPtr); Jim_IncrRefCount(keyObjPtr); Jim_IncrRefCount(valObjPtr); int ret = Jim_SetDictKeysVector(interp, nameObjPtr, &keyObjPtr, 1, valObjPtr, 0); Jim_DecrRefCount(interp, valObjPtr); Jim_DecrRefCount(interp, keyObjPtr); Jim_DecrRefCount(interp, nameObjPtr); if (global) interp->framePtr = savedFramePtr; return ret; } #pragma endregion // stubs #ifndef jim_ext_package __device__ int Jim_PackageProvide(Jim_Interp *interp, const char *name, const char *ver, int flags) { return JIM_OK; } #endif #ifndef jim_ext_aio __device__ FILE *Jim_AioFilehandle(Jim_Interp *interp, Jim_Obj *fhObj) { Jim_SetResultString(interp, "aio not enabled", -1); return NULL; } __device__ int Jim_MakeTempFile(Jim_Interp *interp, const char *template_) { Jim_SetResultString(interp, "platform has no tempfile support", -1); return -1; } #endif #include "jim-eventloop.h" __device__ int Jim_InitStaticExtensions(Jim_Interp *interp) { extern __device__ int Jim_bootstrapInit(Jim_Interp *interp); extern __device__ int Jim_globInit(Jim_Interp *interp); extern __device__ int Jim_stdlibInit(Jim_Interp *interp); extern __device__ int Jim_tclcompatInit(Jim_Interp *interp); // extern __device__ int Jim_aioInit(Jim_Interp *interp); extern __device__ int Jim_arrayInit(Jim_Interp *interp); extern __device__ int Jim_clockInit(Jim_Interp *interp); extern __device__ int Jim_execInit(Jim_Interp *interp); extern __device__ int Jim_fileInit(Jim_Interp *interp); extern __device__ int Jim_readdirInit(Jim_Interp *interp); extern __device__ int Jim_regexpInit(Jim_Interp *interp); // #if __HIPCC__ extern __device__ int Jim_gpuInit(Jim_Interp *interp); #else extern __device__ int Jim_win32Init(Jim_Interp *interp); #endif extern __device__ int Jim_historyInit(Jim_Interp *interp); extern __device__ int Jim_loadInit(Jim_Interp *interp); extern __device__ int Jim_namespaceInit(Jim_Interp *interp); extern __device__ int Jim_packInit(Jim_Interp *interp); extern __device__ int Jim_packageInit(Jim_Interp *interp); //extern __device__ int Jim_tclprefixInit(Jim_Interp *interp); Jim_bootstrapInit(interp); Jim_globInit(interp); Jim_stdlibInit(interp); //Jim_tclcompatInit(interp); // Jim_aioInit(interp); Jim_arrayInit(interp); Jim_clockInit(interp); Jim_eventloopInit(interp); Jim_execInit(interp); Jim_fileInit(interp); Jim_readdirInit(interp); Jim_regexpInit(interp); // #if __HIPCC__ Jim_gpuInit(interp); #else //Jim_win32Init(interp); #endif #ifndef __HIPCC__ Jim_historyInit(interp); Jim_loadInit(interp); Jim_namespaceInit(interp); Jim_packInit(interp); Jim_packageInit(interp); //Jim_tclprefixInit(interp); #endif return JIM_OK; }
34e8a81cd460aa150d1c9b4c1b72e1ae865603d5.cu
/* Jim - A small embeddable Tcl interpreter * * Copyright 2005 Salvatore Sanfilippo <[email protected]> * Copyright 2005 Clemens Hintze <[email protected]> * Copyright 2005 patthoyts - Pat Thoyts <[email protected]> * Copyright 2008,2009 oharboe - Øyvind Harboe - [email protected] * Copyright 2008 Andrew Lunn <[email protected]> * Copyright 2008 Duane Ellis <[email protected]> * Copyright 2008 Uwe Klein <[email protected]> * Copyright 2008 Steve Bennett <[email protected]> * Copyright 2009 Nico Coesel <[email protected]> * Copyright 2009 Zachary T Welch [email protected] * Copyright 2009 David Brownell * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE JIM TCL PROJECT ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * JIM TCL PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation * are those of the authors and should not be interpreted as representing * official policies, either expressed or implied, of the Jim Tcl Project. **/ #pragma region Preamble #define JIM_OPTIMIZATION // comment to avoid optimizations and reduce size //#include <stdiocu.h> #include <stringcu.h> //#include <stdargcu.h> //#include <ctypecu.h> //#include <limitscu.h> #include <errnocu.h> #include <stdlibcu.h> #include <setjmpcu.h> #include <assert.h> #include <timecu.h> #include "jim.h" #include "jimautoconf.h" #include "utf8.h" #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef HAVE_BACKTRACE #include <execinfo.h> #endif #ifdef HAVE_CRT_EXTERNS_H #include <crt_externs.h> #endif // For INFINITY, even if math functions are not enabled #include <math.h> // We may decide to switch to using $[...] after all, so leave it as an option /*#define EXPRSUGAR_BRACKET*/ // For the no-autoconf case #ifndef TCL_LIBRARY #define TCL_LIBRARY "." #endif #ifndef TCL_PLATFORM_OS #define TCL_PLATFORM_OS "unknown" #endif #ifndef TCL_PLATFORM_PLATFORM #define TCL_PLATFORM_PLATFORM "unknown" #endif #ifndef TCL_PLATFORM_PATH_SEPARATOR #define TCL_PLATFORM_PATH_SEPARATOR ":" #endif // GPUEX@begin: turn these back off //#define DEBUG_SHOW_SCRIPT //#define DEBUG_SHOW_SCRIPT_TOKENS //#define DEBUG_SHOW_SUBST //#define DEBUG_SHOW_EXPR //#define DEBUG_SHOW_EXPR_TOKENS //#define JIM_DEBUG_GC // GPUEX@end #ifdef JIM_MAINTAINER #define JIM_DEBUG_COMMAND #define JIM_DEBUG_PANIC #endif // Enable this (in conjunction with valgrind) to help debug reference counting issues /*#define JIM_DISABLE_OBJECT_POOL*/ // Maximum size of an integer #define JIM_INTEGER_SPACE 24 __device__ const char *jim_tt_name(int type); #ifdef JIM_DEBUG_PANIC static __device__ void JimPanicDump(int condition, const char *fmt, ...); #define JimPanic JimPanicDump //(c, msg, ...) JimPanicDump(c, msg, __VA_ARGS__) #else #define JimPanic(c, msg, ...) #endif #pragma endregion // ----------------------------------------------------------------------------- // Global variables // ----------------------------------------------------------------------------- #pragma region Global variables // A shared empty string for the objects string representation. Jim_InvalidateStringRep knows about it and doesn't try to free it. __constant__ static char JimEmptyStringRep[] = ""; #pragma endregion // ----------------------------------------------------------------------------- // Required prototypes of not exported functions // ----------------------------------------------------------------------------- #pragma region Required prototypes of not exported functions static __device__ void JimFreeCallFrame(Jim_Interp *interp, Jim_CallFrame *cf, int action); static __device__ int ListSetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int listindex, Jim_Obj *newObjPtr, int flags); static __device__ int JimDeleteLocalProcs(Jim_Interp *interp, Jim_Stack *localCommands); static __device__ Jim_Obj *JimExpandDictSugar(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void SetDictSubstFromAny(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ Jim_Obj **JimDictPairs(Jim_Obj *dictPtr, int *len); static __device__ void JimSetFailedEnumResult(Jim_Interp *interp, const char *arg, const char *badtype, const char *prefix, const char *const *tablePtr, const char *name); static __device__ int JimCallProcedure(Jim_Interp *interp, Jim_Cmd *cmd, int argc, Jim_Obj *const *argv); static __device__ int JimGetWideNoErr(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide* widePtr); static __device__ int JimSign(jim_wide w); static __device__ int JimValidName(Jim_Interp *interp, const char *type, Jim_Obj *nameObjPtr); static __device__ void JimPrngSeed(Jim_Interp *interp, unsigned char *seed, int seedLen); static __device__ void JimRandomBytes(Jim_Interp *interp, void *dest, unsigned int len); // Fast access to the int (wide) value of an object which is known to be of int type #define JimWideValue(objPtr) (objPtr)->internalRep.wideValue #define JimObjTypeName(O) ((O)->typePtr ? (O)->typePtr->name : "none") static __device__ int utf8_tounicode_case(const char *s, int *uc, int upper) { int l = utf8_tounicode(s, uc); if (upper) { *uc = utf8_upper(*uc); } return l; } // These can be used in addition to JIM_CASESENS/JIM_NOCASE #define JIM_CHARSET_SCAN 2 #define JIM_CHARSET_GLOB 0 // pattern points to a string like "[^a-z\ub5]" // The pattern may contain trailing chars, which are ignored. // The pattern is matched against unicode char 'c'. // If (flags & JIM_NOCASE), case is ignored when matching. // If (flags & JIM_CHARSET_SCAN), the considers ^ and ] special at the start of the charset, per scan, rather than glob/string match. // If the unicode char 'c' matches that set, returns a pointer to the ']' character, or the null character if the ']' is missing. // Returns NULL on no match. static __device__ const char *JimCharsetMatch(const char *pattern, int c, int flags) { int not_ = 0; int pchar; int match = 0; int nocase = 0; if (flags & JIM_NOCASE) { nocase++; c = utf8_upper(c); } if (flags & JIM_CHARSET_SCAN) { if (*pattern == '^') { not_++; pattern++; } // Special case. If the first char is ']', it is part of the set if (*pattern == ']') goto first; } while (*pattern && *pattern != ']') { // Exact match if (pattern[0] == '\\') { first: pattern += utf8_tounicode_case(pattern, &pchar, nocase); } else { // Is this a range? a-z int start; int end; pattern += utf8_tounicode_case(pattern, &start, nocase); if (pattern[0] == '-' && pattern[1]) { // skip '-' pattern += utf8_tounicode(pattern, &pchar); pattern += utf8_tounicode_case(pattern, &end, nocase); // Handle reversed range too if ((c >= start && c <= end) || (c >= end && c <= start)) match = 1; continue; } pchar = start; } if (pchar == c) match = 1; } if (not_) match = !match; return (match ? pattern : nullptr); } // Glob-style pattern matching. // Note: string *must* be valid UTF-8 sequences static __device__ int JimGlobMatch(const char *pattern, const char *string, int nocase) { int c; int pchar; while (*pattern) { switch (pattern[0]) { case '*': while (pattern[1] == '*') pattern++; pattern++; if (!pattern[0]) return 1; // match while (*string) { // Recursive call - Does the remaining pattern match anywhere? if (JimGlobMatch(pattern, string, nocase)) return 1; // match string += utf8_tounicode(string, &c); } return 0; // no match case '?': string += utf8_tounicode(string, &c); break; case '[': { string += utf8_tounicode(string, &c); pattern = JimCharsetMatch(pattern + 1, c, nocase ? JIM_NOCASE : 0); if (!pattern) return 0; if (!*pattern) continue; // Ran out of pattern (no ']') break; } case '\\': if (pattern[1]) pattern++; // fall through default: string += utf8_tounicode_case(string, &c, nocase); utf8_tounicode_case(pattern, &pchar, nocase); if (pchar != c) return 0; break; } pattern += utf8_tounicode_case(pattern, &pchar, nocase); if (!*string) { while (*pattern == '*') pattern++; break; } } return (!*pattern && !*string ? 1 : 0); } // string comparison. Works on binary data. // Returns -1, 0 or 1 // Note that the lengths are byte lengths, not char lengths. static __device__ int JimStringCompare(const char *s1, int l1, const char *s2, int l2) { if (l1 < l2) return (memcmp(s1, s2, l1) <= 0 ? -1 : 1); else if (l2 < l1) return (memcmp(s1, s2, l2) >= 0 ? 1 : -1); else return JimSign(memcmp(s1, s2, l1)); } // Compare null terminated strings, up to a maximum of 'maxchars' characters, (or end of string if 'maxchars' is -1). // Returns -1, 0, 1 for s1 < s2, s1 == s2, s1 > s2 respectively. // Note: does not support embedded nulls. static __device__ int JimStringCompareLen(const char *s1, const char *s2, int maxchars, int nocase) { while (*s1 && *s2 && maxchars) { int c1, c2; s1 += utf8_tounicode_case(s1, &c1, nocase); s2 += utf8_tounicode_case(s2, &c2, nocase); if (c1 != c2) return JimSign(c1 - c2); maxchars--; } if (!maxchars) return 0; // One string or both terminated if (*s1) return 1; if (*s2) return -1; return 0; } // Search 's1' inside 's2', starting to search from char 'index' of 's2'. The index of the first occurrence of s1 in s2 is returned. // If s1 is not found inside s2, -1 is returned. static __device__ int JimStringFirst(const char *s1, int l1, const char *s2, int l2, int idx) { if (!l1 || !l2 || l1 > l2) return -1; if (idx < 0) idx = 0; s2 += utf8_index(s2, idx); int l1bytelen = utf8_index(s1, l1); for (int i = idx; i <= l2 - l1; i++) { if (!memcmp(s2, s1, l1bytelen)) return i; int c; UNUSED_SYMBOL(c); s2 += utf8_tounicode(s2, &c); } return -1; } // Note: Lengths and return value are in bytes, not chars. static __device__ int JimStringLast(const char *s1, int l1, const char *s2, int l2) { if (!l1 || !l2 || l1 > l2) return -1; // Now search for the needle for (const char *p = s2 + l2 - 1; p != s2 - 1; p--) if (*p == *s1 && !memcmp(s1, p, l1)) return (int)(p - s2); return -1; } #ifdef JIM_UTF8 // Note: Lengths and return value are in chars. static __device__ int JimStringLastUtf8(const char *s1, int l1, const char *s2, int l2) { int n = JimStringLast(s1, utf8_index(s1, l1), s2, utf8_index(s2, l2)); if (n > 0) n = utf8_strlen(s2, n); return n; } #endif // After an strtol()/strtod()-like conversion, check whether something was converted and that the only thing left is white space. // Returns JIM_OK or JIM_ERROR. static __device__ int JimCheckConversion(const char *str, const char *endptr) { if (str[0] == '\0' || str == endptr) return JIM_ERROR; if (endptr[0] != '\0') { while (*endptr) { if (!isspace((unsigned char)*endptr)) return JIM_ERROR; endptr++; } } return JIM_OK; } // Parses the front of a number to determine it's sign and base // Returns the index to start parsing according to the given base static __device__ int JimNumberBase(const char *str, int *base, int *sign) { int i = 0; *base = 10; while (isspace(str[i])) i++; if (str[i] == '-') { *sign = -1; i++; } else { if (str[i] == '+') { i++; } *sign = 1; } if (str[i] != '0') return 0; // base 10 // We have 0<x>, so see if we can convert it switch (str[i + 1]) { case 'x': case 'X': *base = 16; break; case 'o': case 'O': *base = 8; break; case 'b': case 'B': *base = 2; break; default: return 0; } i += 2; // Ensure that (e.g.) 0x-5 fails to parse if (str[i] != '-' && str[i] != '+' && !isspace(str[i])) // Parse according to this base return i; // Parse as base 10 *base = 10; return 0; } // Converts a number as per strtol(..., 0) except leading zeros do *not* imply octal. Instead, decimal is assumed unless the number begins with 0x, 0o or 0b static __device__ long jim_strtol(const char *str, char **endptr) { int sign; int base; int i = JimNumberBase(str, &base, &sign); if (base != 10) { long value = strtol(str + i, endptr, base); if (endptr == NULL || *endptr != str + i) return value * sign; } // Can just do a regular base-10 conversion return strtol(str, endptr, 10); } // Converts a number as per strtoull(..., 0) except leading zeros do *not* imply octal. Instead, decimal is assumed unless the number begins with 0x, 0o or 0b static __device__ jim_wide jim_strtoull(const char *str, char **endptr) { if (!strcmp(str, "true")) { *endptr = (char *)(str+4); return 1; } if (!strcmp(str, "false")) { *endptr = (char *)(str+5); return 0; } #ifdef HAVE_LONG_LONG int sign; int base; int i = JimNumberBase(str, &base, &sign); if (base != 10) { jim_wide value = strtoull(str + i, endptr, base); if (endptr == NULL || *endptr != str + i) return value * sign; } // Can just do a regular base-10 conversion return strtoull(str, endptr, 10); #else return (unsigned long)jim_strtol(str, endptr); #endif } __device__ int Jim_StringToWide(const char *str, jim_wide *widePtr, int base) { char *endptr; *widePtr = (base ? strtoull(str, &endptr, base) : jim_strtoull(str, &endptr)); return JimCheckConversion(str, endptr); } __device__ int Jim_StringToDouble(const char *str, double *doublePtr) { char *endptr; // Callers can check for underflow via ERANGE errno = 0; *doublePtr = strtod(str, &endptr); return JimCheckConversion(str, endptr); } static __device__ jim_wide JimPowWide(jim_wide b, jim_wide e) { jim_wide i, res = 1; if ((b == 0 && e != 0) || e < 0) return 0; for (i = 0; i < e; i++) res *= b; return res; } #pragma endregion // ----------------------------------------------------------------------------- // Special functions // ----------------------------------------------------------------------------- #pragma region Special functions #ifdef JIM_DEBUG_PANIC static __device__ void JimPanicDump(int condition, const char *fmt, ...) { if (!condition) return; va_list va; va_start(va, fmt); fprintf_(stderr, "\nJIM INTERPRETER PANIC: "); vfprintf(stderr, fmt, va); fprintf_(stderr, "\n\n"); #ifdef HAVE_BACKTRACE { void *array[40]; int size, i; char **strings; size = backtrace(array, 40); strings = backtrace_symbols(array, size); for (i = 0; i < size; i++) fprintf(stderr, "[backtrace] %s\n", strings[i]); fprintf(stderr, "[backtrace] Include the above lines and the output\n"); fprintf(stderr, "[backtrace] of 'nm <executable>' in the bug report.\n"); } #endif va_end(va); exit(1); } #endif #pragma endregion // ----------------------------------------------------------------------------- // Memory allocation // ----------------------------------------------------------------------------- #pragma region Memory allocation __device__ void *Jim_Alloc(int size) { return (size ? malloc(size) : nullptr); } __device__ void Jim_Free(void *ptr) { free(ptr); } __device__ void *Jim_Realloc(void *ptr, int size) { return realloc(ptr, size); } __device__ char *Jim_StrDup(const char *s) { return strdup(s); } __device__ char *Jim_StrDupLen(const char *s, int l) { char *copy = (char *)Jim_Alloc(l + 1); memcpy(copy, s, l + 1); copy[l] = 0; // Just to be sure, original could be substring return copy; } #pragma endregion // ----------------------------------------------------------------------------- // Time related functions // ----------------------------------------------------------------------------- #pragma region Time related functions // Returns current time in microseconds static __device__ jim_wide JimClock() { struct timeval tv; gettimeofday(&tv, NULL); return (jim_wide)(tv.tv_sec * 1000000 + tv.tv_usec); } #pragma endregion // ----------------------------------------------------------------------------- // Hash Tables // ----------------------------------------------------------------------------- #pragma region Hash Tables // -------------------------- private prototypes ---------------------------- static __device__ void JimExpandHashTableIfNeeded(Jim_HashTable *ht); static __device__ unsigned int JimHashTableNextPower(unsigned int size); static __device__ Jim_HashEntry *JimInsertHashEntry(Jim_HashTable *ht, const void *key, int replace); // -------------------------- hash functions -------------------------------- // Thomas Wang's 32 bit Mix Function __device__ unsigned int Jim_IntHashFunction(unsigned int key) { key += ~(key << 15); key ^= (key >> 10); key += (key << 3); key ^= (key >> 6); key += ~(key << 11); key ^= (key >> 16); return key; } // Generic hash function (we are using to multiply by 9 and add the byte as Tcl) __device__ unsigned int Jim_GenHashFunction(const unsigned char *buf, int len) { unsigned int h = 0; while (len--) h += (h << 3) + *buf++; return h; } // ----------------------------- API implementation ------------------------- // reset a hashtable already initialized static __device__ void JimResetHashTable(Jim_HashTable *ht) { ht->table = NULL; ht->size = 0; ht->sizemask = 0; ht->used = 0; ht->collisions = 0; #ifdef JIM_RANDOMISE_HASH // This is initialised to a random value to avoid a hash collision attack. See: n.runs-SA-2011.004 ht->uniq = (rand() ^ time(NULL) ^ clock()); #else ht->uniq = 0; #endif } static __device__ void JimInitHashTableIterator(Jim_HashTable *ht, Jim_HashTableIterator *iter) { iter->ht = ht; iter->index = -1; iter->entry = NULL; iter->nextEntry = NULL; } // Initialize the hash table __device__ int Jim_InitHashTable(Jim_HashTable *ht, const Jim_HashTableType *type, void *privDataPtr) { JimResetHashTable(ht); ht->type = type; ht->privdata = privDataPtr; return JIM_OK; } // Resize the table to the minimal size that contains all the elements, but with the invariant of a USER/BUCKETS ration near to <= 1 __device__ void Jim_ResizeHashTable(Jim_HashTable *ht) { int minimal = ht->used; if (minimal < JIM_HT_INITIAL_SIZE) minimal = JIM_HT_INITIAL_SIZE; Jim_ExpandHashTable(ht, minimal); } // Expand or create the hashtable __device__ void Jim_ExpandHashTable(Jim_HashTable *ht, unsigned int size) { Jim_HashTable n; // the new hashtable unsigned int realsize = JimHashTableNextPower(size), i; // the size is invalid if it is smaller than the number of elements already inside the hashtable if (size <= ht->used) return; Jim_InitHashTable(&n, ht->type, ht->privdata); n.size = realsize; n.sizemask = realsize - 1; n.table = (Jim_HashEntry **)Jim_Alloc(realsize * sizeof(Jim_HashEntry *)); // Keep the same 'uniq' as the original n.uniq = ht->uniq; // Initialize all the pointers to NULL memset(n.table, 0, realsize * sizeof(Jim_HashEntry *)); // Copy all the elements from the old to the new table: note that if the old hash table is empty ht->used is zero, so Jim_ExpandHashTable just creates an empty hash table. n.used = ht->used; for (i = 0; ht->used > 0; i++) { Jim_HashEntry *he, *nextHe; if (ht->table[i] == NULL) continue; // For each hash entry on this slot... he = ht->table[i]; while (he) { unsigned int h; nextHe = he->next; // Get the new element index h = Jim_HashKey(ht, he->key) & n.sizemask; he->next = n.table[h]; n.table[h] = he; ht->used--; // Pass to the next element he = nextHe; } } assert(ht->used == 0); Jim_Free(ht->table); // Remap the new hashtable in the old *ht = n; } // Add an element to the target hash table __device__ int Jim_AddHashEntry(Jim_HashTable *ht, const void *key, void *val) { Jim_HashEntry *entry; // Get the index of the new element, or -1 if the element already exists. entry = JimInsertHashEntry(ht, key, 0); if (entry == NULL) return JIM_ERROR; // Set the hash entry fields. Jim_SetHashKey(ht, entry, key); Jim_SetHashVal(ht, entry, val); return JIM_OK; } // Add an element, discarding the old if the key already exists __device__ int Jim_ReplaceHashEntry(Jim_HashTable *ht, const void *key, void *val) { int existed; Jim_HashEntry *entry; // Get the index of the new element, or -1 if the element already exists. entry = JimInsertHashEntry(ht, key, 1); if (entry->key) { // It already exists, so only replace the value. Note if both a destructor and a duplicate function exist, // need to dup before destroy. perhaps they are the same reference counted object if (ht->type->valDestructor && ht->type->valDup) { void *newval = ht->type->valDup(ht->privdata, val); ht->type->valDestructor(ht->privdata, entry->u.val); entry->u.val = newval; } else { Jim_FreeEntryVal(ht, entry); Jim_SetHashVal(ht, entry, val); } existed = 1; } else { // Doesn't exist, so set the key Jim_SetHashKey(ht, entry, key); Jim_SetHashVal(ht, entry, val); existed = 0; } return existed; } // Search and remove an element __device__ int Jim_DeleteHashEntry(Jim_HashTable *ht, const void *key) { unsigned int h; Jim_HashEntry *he, *prevHe; if (ht->used == 0) return JIM_ERROR; h = Jim_HashKey(ht, key) & ht->sizemask; he = ht->table[h]; prevHe = NULL; while (he) { if (Jim_CompareHashKeys(ht, key, he->key)) { // Unlink the element from the list if (prevHe) prevHe->next = he->next; else ht->table[h] = he->next; Jim_FreeEntryKey(ht, he); Jim_FreeEntryVal(ht, he); Jim_Free(he); ht->used--; return JIM_OK; } prevHe = he; he = he->next; } return JIM_ERROR; // not found } // Destroy an entire hash table and leave it ready for reuse __device__ int Jim_FreeHashTable(Jim_HashTable *ht) { unsigned int i; // Free all the elements for (i = 0; ht->used > 0; i++) { Jim_HashEntry *he, *nextHe; if ((he = ht->table[i]) == NULL) continue; while (he) { nextHe = he->next; Jim_FreeEntryKey(ht, he); Jim_FreeEntryVal(ht, he); Jim_Free(he); ht->used--; he = nextHe; } } // Free the table and the allocated cache structure Jim_Free(ht->table); // Re-initialize the table JimResetHashTable(ht); return JIM_OK; // never fails } __device__ Jim_HashEntry *Jim_FindHashEntry(Jim_HashTable *ht, const void *key) { Jim_HashEntry *he; unsigned int h; if (ht->used == 0) return NULL; h = Jim_HashKey(ht, key) & ht->sizemask; he = ht->table[h]; while (he) { if (Jim_CompareHashKeys(ht, key, he->key)) return he; he = he->next; } return NULL; } __device__ Jim_HashTableIterator *Jim_GetHashTableIterator(Jim_HashTable *ht) { Jim_HashTableIterator *iter = (Jim_HashTableIterator *)Jim_Alloc(sizeof(*iter)); JimInitHashTableIterator(ht, iter); return iter; } __device__ Jim_HashEntry *Jim_NextHashEntry(Jim_HashTableIterator *iter) { while (1) { if (iter->entry == NULL) { iter->index++; if (iter->index >= (signed)iter->ht->size) break; iter->entry = iter->ht->table[iter->index]; } else iter->entry = iter->nextEntry; if (iter->entry) { // We need to save the 'next' here, the iterator user may delete the entry we are returning. iter->nextEntry = iter->entry->next; return iter->entry; } } return NULL; } // ------------------------- private functions ------------------------------ // Expand the hash table if needed static __device__ void JimExpandHashTableIfNeeded(Jim_HashTable *ht) { // If the hash table is empty expand it to the intial size, if the table is "full" dobule its size. if (ht->size == 0) Jim_ExpandHashTable(ht, JIM_HT_INITIAL_SIZE); if (ht->size == ht->used) Jim_ExpandHashTable(ht, ht->size * 2); } // Our hash table capability is a power of two static __device__ unsigned int JimHashTableNextPower(unsigned int size) { unsigned int i = JIM_HT_INITIAL_SIZE; if (size >= 2147483648U) return 2147483648U; while (1) { if (i >= size) return i; i *= 2; } } // Returns the index of a free slot that can be populated with a hash entry for the given 'key'. // If the key already exists, -1 is returned. static __device__ Jim_HashEntry *JimInsertHashEntry(Jim_HashTable *ht, const void *key, int replace) { // Expand the hashtable if needed JimExpandHashTableIfNeeded(ht); // Compute the key hash value unsigned int h = Jim_HashKey(ht, key) & ht->sizemask; // Search if this slot does not already contain the given key Jim_HashEntry *he = ht->table[h]; while (he) { if (Jim_CompareHashKeys(ht, key, he->key)) return (replace ? he : NULL); he = he->next; } // Allocates the memory and stores key he = (Jim_HashEntry *)Jim_Alloc(sizeof(*he)); he->next = ht->table[h]; ht->table[h] = he; ht->used++; he->key = NULL; return he; } // ----------------------- StringCopy Hash Table Type ------------------------ static __device__ unsigned int JimStringCopyHTHashFunction(const void *key) { return Jim_GenHashFunction((const unsigned char *)key, (int)strlen((const char *)key)); } static __device__ void *JimStringCopyHTDup(void *privdata, const void *key) { return Jim_StrDup((const char *)key); } static __device__ int JimStringCopyHTKeyCompare(void *privdata, const void *key1, const void *key2) { return !strcmp((const char *)key1, (const char *)key2); } static __device__ void JimStringCopyHTKeyDestructor(void *privdata, void *key) { Jim_Free(key); } static __device__ const Jim_HashTableType JimPackageHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor NULL // val destructor }; typedef struct AssocDataValue { Jim_InterpDeleteProc *delProc; void *data; } AssocDataValue; static __device__ void JimAssocDataHashTableValueDestructor(void *privdata, void *data) { AssocDataValue *assocPtr = (AssocDataValue *)data; if (assocPtr->delProc != NULL) assocPtr->delProc((Jim_Interp *)privdata, assocPtr->data); Jim_Free(data); } __constant__ static const Jim_HashTableType JimAssocDataHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor JimAssocDataHashTableValueDestructor // val destructor }; #pragma endregion // ----------------------------------------------------------------------------- // Stack - This is a simple generic stack implementation. It is used for example in the 'expr' expression compiler. // ----------------------------------------------------------------------------- #pragma region Stack __device__ void Jim_InitStack(Jim_Stack *stack) { stack->len = 0; stack->maxlen = 0; stack->vector = NULL; } __device__ void Jim_FreeStack(Jim_Stack *stack) { Jim_Free(stack->vector); } __device__ int Jim_StackLen(Jim_Stack *stack) { return stack->len; } __device__ void Jim_StackPush(Jim_Stack *stack, void *element) { int neededLen = stack->len + 1; if (neededLen > stack->maxlen) { stack->maxlen = (neededLen < 20 ? 20 : neededLen * 2); stack->vector = (void **)Jim_Realloc(stack->vector, sizeof(void *) * stack->maxlen); } stack->vector[stack->len] = element; stack->len++; } __device__ void *Jim_StackPop(Jim_Stack *stack) { if (stack->len == 0) return NULL; stack->len--; return stack->vector[stack->len]; } __device__ void *Jim_StackPeek(Jim_Stack *stack) { if (stack->len == 0) return NULL; return stack->vector[stack->len - 1]; } __device__ void Jim_FreeStackElements(Jim_Stack *stack, void (*freeFunc)(void*ptr)) { for (int i = 0; i < stack->len; i++) freeFunc(stack->vector[i]); } #pragma endregion // ----------------------------------------------------------------------------- // Tcl Parser // ----------------------------------------------------------------------------- #pragma region Tcl Parser // Token types #define JIM_TT_NONE 0 // No token returned #define JIM_TT_STR 1 // simple string #define JIM_TT_ESC 2 // string that needs escape chars conversion #define JIM_TT_VAR 3 // var substitution #define JIM_TT_DICTSUGAR 4 // Syntax sugar for [dict get], $foo(bar) #define JIM_TT_CMD 5 // command substitution // Note: Keep these three together for TOKEN_IS_SEP() #define JIM_TT_SEP 6 // word separator (white space) #define JIM_TT_EOL 7 // line separator #define JIM_TT_EOF 8 // end of script #define JIM_TT_LINE 9 // special 'start-of-line' token. arg is # of arguments to the command. -ve if {*} #define JIM_TT_WORD 10 // special 'start-of-word' token. arg is # of tokens to combine. -ve if {*} // Additional token types needed for expressions #define JIM_TT_SUBEXPR_START 11 #define JIM_TT_SUBEXPR_END 12 #define JIM_TT_SUBEXPR_COMMA 13 #define JIM_TT_EXPR_INT 14 #define JIM_TT_EXPR_DOUBLE 15 #define JIM_TT_EXPRSUGAR 16 // $(expression) // Operator token types start here #define JIM_TT_EXPR_OP 20 #define TOKEN_IS_SEP(type) (type >= JIM_TT_SEP && type <= JIM_TT_EOF) // Parser states #define JIM_PS_DEF 0 // Default state #define JIM_PS_QUOTE 1 // Inside "" #define JIM_PS_DICTSUGAR 2 // Tokenising abc(def) into 4 separate tokens // Results of missing quotes, braces, etc. from parsing. struct JimParseMissing { int ch; // At end of parse, ' ' if complete or '{', '[', '"', '\\' , '{' if incomplete int line; // Line number starting the missing token }; // Parser context structure. The same context is used both to parse Tcl scripts and lists. struct JimParserCtx { const char *p; // Pointer to the point of the program we are parsing int len; // Remaining length int linenr; // Current line number const char *tstart; const char *tend; // Returned token is at tstart-tend in 'prg'. int tline; // Line number of the returned token int tt; // Token type int eof; // Non zero if EOF condition is true. int state; // Parser state int comment; // Non zero if the next chars may be a comment. struct JimParseMissing missing; // Details of any missing quotes, etc. }; static __device__ int JimParseScript(struct JimParserCtx *pc); static __device__ int JimParseSep(struct JimParserCtx *pc); static __device__ int JimParseEol(struct JimParserCtx *pc); static __device__ int JimParseCmd(struct JimParserCtx *pc); static __device__ int JimParseQuote(struct JimParserCtx *pc); static __device__ int JimParseVar(struct JimParserCtx *pc); static __device__ int JimParseBrace(struct JimParserCtx *pc); static __device__ int JimParseStr(struct JimParserCtx *pc); static __device__ int JimParseComment(struct JimParserCtx *pc); static __device__ void JimParseSubCmd(struct JimParserCtx *pc); static __device__ int JimParseSubQuote(struct JimParserCtx *pc); static __device__ Jim_Obj *JimParserGetTokenObj(Jim_Interp *interp, struct JimParserCtx *pc); // Initialize a parser context. 'prg' is a pointer to the program text, linenr is the line number of the first line contained in the program. static __device__ void JimParserInit(struct JimParserCtx *pc, const char *prg, int len, int linenr) { pc->p = prg; pc->len = len; pc->tstart = NULL; pc->tend = NULL; pc->tline = 0; pc->tt = JIM_TT_NONE; pc->eof = 0; pc->state = JIM_PS_DEF; pc->linenr = linenr; pc->comment = 1; pc->missing.ch = ' '; pc->missing.line = linenr; } static __device__ int JimParseScript(struct JimParserCtx *pc) { while (1) { // the while is used to reiterate with continue if needed if (!pc->len) { pc->tstart = pc->p; pc->tend = pc->p - 1; pc->tline = pc->linenr; pc->tt = JIM_TT_EOL; pc->eof = 1; return JIM_OK; } switch (*(pc->p)) { case '\\': if (*(pc->p + 1) == '\n' && pc->state == JIM_PS_DEF) return JimParseSep(pc); pc->comment = 0; return JimParseStr(pc); case ' ': case '\t': case '\r': case '\f': if (pc->state == JIM_PS_DEF) return JimParseSep(pc); pc->comment = 0; return JimParseStr(pc); case '\n': case ';': pc->comment = 1; if (pc->state == JIM_PS_DEF) return JimParseEol(pc); return JimParseStr(pc); case '[': pc->comment = 0; return JimParseCmd(pc); case '$': pc->comment = 0; if (JimParseVar(pc) == JIM_ERROR) { // An orphan $. Create as a separate token pc->tstart = pc->tend = pc->p++; pc->len--; pc->tt = JIM_TT_ESC; } return JIM_OK; case '#': if (pc->comment) { JimParseComment(pc); continue; } return JimParseStr(pc); default: pc->comment = 0; return JimParseStr(pc); } //return JIM_OK; // unreached } } static __device__ int JimParseSep(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; while (isspace(*pc->p) || (*pc->p == '\\' && *(pc->p + 1) == '\n')) { if (*pc->p == '\n') break; if (*pc->p == '\\') { pc->p++; pc->len--; pc->linenr++; } pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_SEP; return JIM_OK; } static __device__ int JimParseEol(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; while (isspace(*pc->p) || *pc->p == ';') { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_EOL; return JIM_OK; } // Here are the rules for parsing: // {braced expression} // - Count open and closing braces // - Backslash escapes meaning of braces // // "quoted expression" // - First double quote at start of word terminates the expression // - Backslash escapes quote and bracket // - [commands brackets] are counted/nested // - command rules apply within [brackets], not quoting rules (i.e. quotes have their own rules) // // [command expression] // - Count open and closing brackets // - Backslash escapes quote, bracket and brace // - [commands brackets] are counted/nested // - "quoted expressions" are parsed according to quoting rules // - {braced expressions} are parsed according to brace rules // // For everything, backslash escapes the next char, newline increments current line // Parses a braced expression starting at pc->p. // Positions the parser at the end of the braced expression, sets pc->tend and possibly pc->missing. static __device__ void JimParseSubBrace(struct JimParserCtx *pc) { int level = 1; // Skip the brace pc->p++; pc->len--; while (pc->len) { switch (*pc->p) { case '\\': if (pc->len > 1) { if (*++pc->p == '\n') pc->linenr++; pc->len--; } break; case '{': level++; break; case '}': if (--level == 0) { pc->tend = pc->p - 1; pc->p++; pc->len--; return; } break; case '\n': pc->linenr++; break; } pc->p++; pc->len--; } pc->missing.ch = '{'; pc->missing.line = pc->tline; pc->tend = pc->p - 1; } // Parses a quoted expression starting at pc->p. // Positions the parser at the end of the quoted expression, sets pc->tend and possibly pc->missing. // Returns the type of the token of the string, either JIM_TT_ESC (if it contains values which need to be [subst]ed) or JIM_TT_STR. static __device__ int JimParseSubQuote(struct JimParserCtx *pc) { int tt = JIM_TT_STR; int line = pc->tline; // Skip the quote pc->p++; pc->len--; while (pc->len) { switch (*pc->p) { case '\\': if (pc->len > 1) { if (*++pc->p == '\n') pc->linenr++; pc->len--; tt = JIM_TT_ESC; } break; case '"': pc->tend = pc->p - 1; pc->p++; pc->len--; return tt; case '[': JimParseSubCmd(pc); tt = JIM_TT_ESC; continue; case '\n': pc->linenr++; break; case '$': tt = JIM_TT_ESC; break; } pc->p++; pc->len--; } pc->missing.ch = '"'; pc->missing.line = line; pc->tend = pc->p - 1; return tt; } // Parses a [command] expression starting at pc->p. // Positions the parser at the end of the command expression, sets pc->tend and possibly pc->missing. static __device__ void JimParseSubCmd(struct JimParserCtx *pc) { int level = 1; int startofword = 1; int line = pc->tline; // Skip the bracket pc->p++; pc->len--; while (pc->len) { switch (*pc->p) { case '\\': if (pc->len > 1) { if (*++pc->p == '\n') pc->linenr++; pc->len--; } break; case '[': level++; break; case ']': if (--level == 0) { pc->tend = pc->p - 1; pc->p++; pc->len--; return; } break; case '"': if (startofword) { JimParseSubQuote(pc); continue; } break; case '{': JimParseSubBrace(pc); startofword = 0; continue; case '\n': pc->linenr++; break; } startofword = isspace(*pc->p); pc->p++; pc->len--; } pc->missing.ch = '['; pc->missing.line = line; pc->tend = pc->p - 1; } static __device__ int JimParseBrace(struct JimParserCtx *pc) { pc->tstart = pc->p + 1; pc->tline = pc->linenr; pc->tt = JIM_TT_STR; JimParseSubBrace(pc); return JIM_OK; } static __device__ int JimParseCmd(struct JimParserCtx *pc) { pc->tstart = pc->p + 1; pc->tline = pc->linenr; pc->tt = JIM_TT_CMD; JimParseSubCmd(pc); return JIM_OK; } static __device__ int JimParseQuote(struct JimParserCtx *pc) { pc->tstart = pc->p + 1; pc->tline = pc->linenr; pc->tt = JimParseSubQuote(pc); return JIM_OK; } static __device__ int JimParseVar(struct JimParserCtx *pc) { // skip the $ pc->p++; pc->len--; #ifdef EXPRSUGAR_BRACKET if (*pc->p == '[') { // Parse $[...] expr shorthand syntax JimParseCmd(pc); pc->tt = JIM_TT_EXPRSUGAR; return JIM_OK; } #endif pc->tstart = pc->p; pc->tt = JIM_TT_VAR; pc->tline = pc->linenr; if (*pc->p == '{') { pc->tstart = ++pc->p; pc->len--; while (pc->len && *pc->p != '}') { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } pc->tend = pc->p - 1; if (pc->len) { pc->p++; pc->len--; } } else { while (1) { // Skip double colon, but not single colon! if (pc->p[0] == ':' && pc->p[1] == ':') { while (*pc->p == ':') { pc->p++; pc->len--; } continue; } // Note that any char >= 0x80 must be part of a utf-8 char. We consider all unicode points outside of ASCII as letters if (isalnum(*pc->p) || *pc->p == '_' || (unsigned char)(*pc->p) >= 0x80) { pc->p++; pc->len--; continue; } break; } // Parse [dict get] syntax sugar. if (*pc->p == '(') { int count = 1; const char *paren = NULL; pc->tt = JIM_TT_DICTSUGAR; while (count && pc->len) { pc->p++; pc->len--; if (*pc->p == '\\' && pc->len >= 1) { pc->p++; pc->len--; } else if (*pc->p == '(') count++; else if (*pc->p == ')') { paren = pc->p; count--; } } if (count == 0) { pc->p++; pc->len--; } else if (paren) { // Did not find a matching paren. Back up paren++; pc->len += (int)(pc->p - paren); pc->p = paren; } #ifndef EXPRSUGAR_BRACKET if (*pc->tstart == '(') { pc->tt = JIM_TT_EXPRSUGAR; } #endif } pc->tend = pc->p - 1; } // Check if we parsed just the '$' character. That's not a variable so an error is returned to tell the state machine to consider this '$' just a string. */ if (pc->tstart == pc->p) { pc->p--; pc->len++; return JIM_ERROR; } return JIM_OK; } static __device__ int JimParseStr(struct JimParserCtx *pc) { if (pc->tt == JIM_TT_SEP || pc->tt == JIM_TT_EOL || pc->tt == JIM_TT_NONE || pc->tt == JIM_TT_STR) { // Starting a new word if (*pc->p == '{') return JimParseBrace(pc); if (*pc->p == '"') { pc->state = JIM_PS_QUOTE; pc->p++; pc->len--; // In case the end quote is missing pc->missing.line = pc->tline; } } pc->tstart = pc->p; pc->tline = pc->linenr; while (1) { if (pc->len == 0) { if (pc->state == JIM_PS_QUOTE) pc->missing.ch = '"'; pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } switch (*pc->p) { case '\\': if (pc->state == JIM_PS_DEF && *(pc->p + 1) == '\n') { pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } if (pc->len >= 2) { if (*(pc->p + 1) == '\n') pc->linenr++; pc->p++; pc->len--; } // End of script with trailing backslash else if (pc->len == 1) pc->missing.ch = '\\'; break; case '(': // If the following token is not '$' just keep going if (pc->len > 1 && pc->p[1] != '$') break; case ')': // Only need a separate ')' token if the previous was a var if (*pc->p == '(' || pc->tt == JIM_TT_VAR) { if (pc->p == pc->tstart) { // At the start of the token, so just return this char pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } break; case '$': case '[': pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; case ' ': case '\t': case '\n': case '\r': case '\f': case ';': if (pc->state == JIM_PS_DEF) { pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; return JIM_OK; } else if (*pc->p == '\n') pc->linenr++; break; case '"': if (pc->state == JIM_PS_QUOTE) { pc->tend = pc->p - 1; pc->tt = JIM_TT_ESC; pc->p++; pc->len--; pc->state = JIM_PS_DEF; return JIM_OK; } break; } pc->p++; pc->len--; } //return JIM_OK; // unreached } static __device__ int JimParseComment(struct JimParserCtx *pc) { while (*pc->p) { if (*pc->p == '\\') { pc->p++; pc->len--; if (pc->len == 0) { pc->missing.ch = '\\'; return JIM_OK; } if (*pc->p == '\n') pc->linenr++; } else if (*pc->p == '\n') { pc->p++; pc->len--; pc->linenr++; break; } pc->p++; pc->len--; } return JIM_OK; } // xdigitval and odigitval are helper functions for JimEscape() static __device__ int xdigitval(int c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static __device__ int odigitval(int c) { if (c >= '0' && c <= '7') return c - '0'; return -1; } // Perform Tcl escape substitution of 's', storing the result string into 'dest'. The escaped string is guaranteed to // be the same length or shorted than the source string. Slen is the length of the string at 's', if it's -1 the string length will be calculated by the function. // The function returns the length of the resulting string. static __device__ int JimEscape(char *dest, const char *s, int slen) { char *p = dest; int i, len; if (slen == -1) slen = (int)strlen(s); for (i = 0; i < slen; i++) { switch (s[i]) { case '\\': switch (s[i + 1]) { case 'a': *p++ = 0x7; i++; break; case 'b': *p++ = 0x8; i++; break; case 'f': *p++ = 0xc; i++; break; case 'n': *p++ = 0xa; i++; break; case 'r': *p++ = 0xd; i++; break; case 't': *p++ = 0x9; i++; break; case 'u': case 'U': case 'x': // A unicode or hex sequence. // \x Expect 1-2 hex chars and convert to hex. // \u Expect 1-4 hex chars and convert to utf-8. // \U Expect 1-8 hex chars and convert to utf-8. // \u{NNN} supports 1-6 hex chars and convert to utf-8. // An invalid sequence means simply the escaped char. { unsigned val = 0; int k; int maxchars = 2; i++; if (s[i] == 'U') maxchars = 8; else if (s[i] == 'u') { if (s[i + 1] == '{') { maxchars = 6; i++; } else maxchars = 4; } for (k = 0; k < maxchars; k++) { int c = xdigitval(s[i + k + 1]); if (c == -1) break; val = (val << 4) | c; } // The \u{nnn} syntax supports up to 21 bit codepoints. if (s[i] == '{') { if (k == 0 || val > 0x1fffff || s[i + k + 1] != '}') { // Back up i--; k = 0; } // Skip the closing brace else k++; } if (k) { // Got a valid sequence, so convert if (s[i] == 'x') *p++ = val; else p += utf8_fromunicode(p, val); i += k; break; } // Not a valid codepoint, just an escaped char *p++ = s[i]; } break; case 'v': *p++ = 0xb; i++; break; case '\0': *p++ = '\\'; i++; break; case '\n': // Replace all spaces and tabs after backslash newline with a single space *p++ = ' '; do { i++; } while (s[i + 1] == ' ' || s[i + 1] == '\t'); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': // octal escape { int val = 0; int c = odigitval(s[i + 1]); val = c; c = odigitval(s[i + 2]); if (c == -1) { *p++ = val; i++; break; } val = (val * 8) + c; c = odigitval(s[i + 3]); if (c == -1) { *p++ = val; i += 2; break; } val = (val * 8) + c; *p++ = val; i += 3; } break; default: *p++ = s[i + 1]; i++; break; } break; default: *p++ = s[i]; break; } } len = (int)(p - dest); *p = '\0'; return len; } // Returns a dynamically allocated copy of the current token in the parser context. The function performs conversion of escapes if the token is of type JIM_TT_ESC. // Note that after the conversion, tokens that are grouped with braces in the source code, are always recognizable from the identical string obtained in a different way from the type. // For example the string: // {*}$a // will return as first token "*", of type JIM_TT_STR // While the string: // *$a // will return as first token "*", of type JIM_TT_ESC static __device__ Jim_Obj *JimParserGetTokenObj(Jim_Interp *interp, struct JimParserCtx *pc) { char *token; int len; const char *start = pc->tstart; const char *end = pc->tend; if (start > end) { len = 0; token = (char *)Jim_Alloc(1); token[0] = '\0'; } else { len = (int)(end - start) + 1; token = (char *)Jim_Alloc(len + 1); if (pc->tt != JIM_TT_ESC) { // No escape conversion needed? Just copy it. memcpy(token, start, len); token[len] = '\0'; } // Else convert the escape chars. else len = JimEscape(token, start, len); } return Jim_NewStringObjNoAlloc(interp, token, len); } // Parses the given string to determine if it represents a complete script. // This is useful for interactive shells implementation, for [info complete]. // If 'stateCharPtr' != NULL, the function stores ' ' on complete script, // '{' on scripts incomplete missing one or more '}' to be balanced. // '[' on scripts incomplete missing one or more ']' to be balanced. // '"' on scripts incomplete missing a '"' char. // '\\' on scripts with a trailing backslash. // If the script is complete, 1 is returned, otherwise 0. __device__ int Jim_ScriptIsComplete(const char *s, int len, char *stateCharPtr) { struct JimParserCtx parser; JimParserInit(&parser, s, len, 1); while (!parser.eof) JimParseScript(&parser); if (stateCharPtr) *stateCharPtr = parser.missing.ch; return (parser.missing.ch == ' '); } #pragma endregion // ----------------------------------------------------------------------------- // Tcl Lists parsing // ----------------------------------------------------------------------------- #pragma region Tcl Lists parsing static __device__ int JimParseListSep(struct JimParserCtx *pc); static __device__ int JimParseListStr(struct JimParserCtx *pc); static __device__ int JimParseListQuote(struct JimParserCtx *pc); static __device__ int JimParseList(struct JimParserCtx *pc) { if (isspace(*pc->p)) return JimParseListSep(pc); switch (*pc->p) { case '"': return JimParseListQuote(pc); case '{': return JimParseBrace(pc); default: if (pc->len) return JimParseListStr(pc); break; } pc->tstart = pc->tend = pc->p; pc->tline = pc->linenr; pc->tt = JIM_TT_EOL; pc->eof = 1; return JIM_OK; } static __device__ int JimParseListSep(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; while (isspace(*pc->p)) { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = JIM_TT_SEP; return JIM_OK; } static __device__ int JimParseListQuote(struct JimParserCtx *pc) { pc->p++; pc->len--; pc->tstart = pc->p; pc->tline = pc->linenr; pc->tt = JIM_TT_STR; while (pc->len) { switch (*pc->p) { case '\\': pc->tt = JIM_TT_ESC; if (--pc->len == 0) { // Trailing backslash pc->tend = pc->p; return JIM_OK; } pc->p++; break; case '\n': pc->linenr++; break; case '"': pc->tend = pc->p - 1; pc->p++; pc->len--; return JIM_OK; } pc->p++; pc->len--; } pc->tend = pc->p - 1; return JIM_OK; } static __device__ int JimParseListStr(struct JimParserCtx *pc) { pc->tstart = pc->p; pc->tline = pc->linenr; pc->tt = JIM_TT_STR; while (pc->len) { if (isspace(*pc->p)) { pc->tend = pc->p - 1; return JIM_OK; } if (*pc->p == '\\') { if (--pc->len == 0) { // Trailing backslash pc->tend = pc->p; return JIM_OK; } pc->tt = JIM_TT_ESC; pc->p++; } pc->p++; pc->len--; } pc->tend = pc->p - 1; return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Jim_Obj related functions // ----------------------------------------------------------------------------- #pragma region Jim_Obj related functions // Return a new initialized object. __device__ Jim_Obj *Jim_NewObj(Jim_Interp *interp) { Jim_Obj *objPtr; // Check if there are objects in the free list if (interp->freeList != NULL) { // Unlink the object from the free list objPtr = interp->freeList; interp->freeList = objPtr->nextObjPtr; } // No ready to use objects: allocate a new one else objPtr = (Jim_Obj *)Jim_Alloc(sizeof(*objPtr)); // Object is returned with refCount of 0. Every kind of GC implemented should take care to don't try to scan objects with refCount == 0. objPtr->refCount = 0; // All the other fields are left not initialized to save time. The caller will probably want to set them to the right value anyway. // Put the object into the live list objPtr->prevObjPtr = NULL; objPtr->nextObjPtr = interp->liveList; if (interp->liveList) interp->liveList->prevObjPtr = objPtr; interp->liveList = objPtr; return objPtr; } // Free an object. Actually objects are never freed, but just moved to the free objects list, where they will be reused by Jim_NewObj(). __device__ void Jim_FreeObj(Jim_Interp *interp, Jim_Obj *objPtr) { // Check if the object was already freed, panic. JimPanic(objPtr->refCount != 0, "!!!Object %p freed with bad refcount %d, type=%s", objPtr, objPtr->refCount, objPtr->typePtr ? objPtr->typePtr->name : "<none>"); // Free the internal representation Jim_FreeIntRep(interp, objPtr); // Free the string representation if (objPtr->bytes != NULL) if (objPtr->bytes != JimEmptyStringRep) Jim_Free(objPtr->bytes); // Unlink the object from the live objects list if (objPtr->prevObjPtr) objPtr->prevObjPtr->nextObjPtr = objPtr->nextObjPtr; if (objPtr->nextObjPtr) objPtr->nextObjPtr->prevObjPtr = objPtr->prevObjPtr; if (interp->liveList == objPtr) interp->liveList = objPtr->nextObjPtr; #ifdef JIM_DISABLE_OBJECT_POOL Jim_Free(objPtr); #else // Link the object into the free objects list objPtr->prevObjPtr = NULL; objPtr->nextObjPtr = interp->freeList; if (interp->freeList) interp->freeList->prevObjPtr = objPtr; interp->freeList = objPtr; objPtr->refCount = -1; #endif } // Invalidate the string representation of an object. __device__ void Jim_InvalidateStringRep(Jim_Obj *objPtr) { if (objPtr->bytes != NULL) if (objPtr->bytes != JimEmptyStringRep) Jim_Free(objPtr->bytes); objPtr->bytes = NULL; } // Duplicate an object. The returned object has refcount = 0. __device__ Jim_Obj *Jim_DuplicateObj(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_Obj *dupPtr = Jim_NewObj(interp); if (objPtr->bytes == NULL) dupPtr->bytes = NULL; // Object does not have a valid string representation. else if (objPtr->length == 0) { // Zero length, so don't even bother with the type-specific dup, since all zero length objects look the same dupPtr->bytes = JimEmptyStringRep; dupPtr->length = 0; dupPtr->typePtr = NULL; return dupPtr; } else { dupPtr->bytes = (char *)Jim_Alloc(objPtr->length + 1); dupPtr->length = objPtr->length; memcpy(dupPtr->bytes, objPtr->bytes, objPtr->length + 1); // Copy the null byte too } // By default, the new object has the same type as the old object dupPtr->typePtr = objPtr->typePtr; if (objPtr->typePtr != NULL) { if (objPtr->typePtr->dupIntRepProc == NULL) dupPtr->internalRep = objPtr->internalRep; else objPtr->typePtr->dupIntRepProc(interp, objPtr, dupPtr); // The dup proc may set a different type, e.g. NULL } return dupPtr; } // Return the string representation for objPtr. If the object's string representation is invalid, calls the updateStringProc method to create a new one from the internal representation of the object. __device__ const char *Jim_GetString(Jim_Obj *objPtr, int *lenPtr) { if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } if (lenPtr) *lenPtr = objPtr->length; return objPtr->bytes; } // Just returns the length of the object's string rep __device__ int Jim_Length(Jim_Obj *objPtr) { if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } return objPtr->length; } // Just returns object's string rep __device__ const char *Jim_String(Jim_Obj *objPtr) { if (!objPtr) return nullptr; if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr == NULL, "UpdateStringProc called against typeless value."); JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } return objPtr->bytes; } static __device__ void JimSetStringBytes(Jim_Obj *objPtr, const char *str) { objPtr->bytes = Jim_StrDup(str); objPtr->length = (int)strlen(str); } static __device__ void FreeDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); __constant__ static const Jim_ObjType _dictSubstObjType = { "dict-substitution", FreeDictSubstInternalRep, DupDictSubstInternalRep, NULL, JIM_TYPE_NONE, }; static __device__ void FreeInterpolatedInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.indexObjPtr); } __constant__ static const Jim_ObjType _interpolatedObjType = { "interpolated", FreeInterpolatedInternalRep, NULL, NULL, JIM_TYPE_NONE, }; #pragma endregion // ----------------------------------------------------------------------------- // String Object // ----------------------------------------------------------------------------- #pragma region String Object static __device__ void DupStringInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ int SetStringFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _stringObjType = { "string", NULL, DupStringInternalRep, NULL, JIM_TYPE_REFERENCES, }; static __device__ void DupStringInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); // This is a bit subtle: the only caller of this function should be Jim_DuplicateObj(), that will copy the string representaion. After the copy, the duplicated // object will not have more room in the buffer than srcPtr->length bytes. So we just set it to length. dupPtr->internalRep.strValue.maxLength = srcPtr->length; dupPtr->internalRep.strValue.charLength = srcPtr->internalRep.strValue.charLength; } static __device__ int SetStringFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_stringObjType) { // Get a fresh string representation. if (objPtr->bytes == NULL) { // Invalid string repr. Generate it. JimPanic(objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name); objPtr->typePtr->updateStringProc(objPtr); } // Free any other internal representation. Jim_FreeIntRep(interp, objPtr); // Set it as string, i.e. just set the maxLength field. objPtr->typePtr = &_stringObjType; objPtr->internalRep.strValue.maxLength = objPtr->length; // Don't know the utf-8 length yet objPtr->internalRep.strValue.charLength = -1; } return JIM_OK; } // Returns the length of the object string in chars, not bytes. // These may be different for a utf-8 string. __device__ int Jim_Utf8Length(Jim_Interp *interp, Jim_Obj *objPtr) { #ifdef JIM_UTF8 SetStringFromAny(interp, objPtr); if (objPtr->internalRep.strValue.charLength < 0) objPtr->internalRep.strValue.charLength = utf8_strlen(objPtr->bytes, objPtr->length); return objPtr->internalRep.strValue.charLength; #else return Jim_Length(objPtr); #endif } // len is in bytes -- see also Jim_NewStringObjUtf8() __device__ Jim_Obj *Jim_NewStringObj(Jim_Interp *interp, const char *s, int len) { Jim_Obj *objPtr = Jim_NewObj(interp); // Need to find out how many bytes the string requires if (len == -1) len = (int)strlen(s); // Alloc/Set the string rep. if (len == 0) objPtr->bytes = JimEmptyStringRep; else { objPtr->bytes = (char *)Jim_Alloc(len + 1); memcpy(objPtr->bytes, s, len); objPtr->bytes[len] = '\0'; } objPtr->length = len; // No typePtr field for the vanilla string object. objPtr->typePtr = NULL; return objPtr; } // charlen is in characters -- see also Jim_NewStringObj() __device__ Jim_Obj *Jim_NewStringObjUtf8(Jim_Interp *interp, const char *s, int charlen) { #ifdef JIM_UTF8 // Need to find out how many bytes the string requires int bytelen = utf8_index(s, charlen); Jim_Obj *objPtr = Jim_NewStringObj(interp, s, bytelen); // Remember the utf8 length, so set the type objPtr->typePtr = &stringObjType; objPtr->internalRep.strValue.maxLength = bytelen; objPtr->internalRep.strValue.charLength = charlen; return objPtr; #else return Jim_NewStringObj(interp, s, charlen); #endif } // This version does not try to duplicate the 's' pointer, but use it directly. __device__ Jim_Obj *Jim_NewStringObjNoAlloc(Jim_Interp *interp, char *s, int len) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->bytes = s; objPtr->length = (len == -1 ? (int)strlen(s) : len); objPtr->typePtr = NULL; return objPtr; } // Low-level string append. Use it only against unshared objects of type "string". static __device__ void StringAppendString(Jim_Obj *objPtr, const char *str, int len) { if (len == -1) len = (int)strlen(str); int needlen = objPtr->length + len; if (objPtr->internalRep.strValue.maxLength < needlen || objPtr->internalRep.strValue.maxLength == 0) { needlen *= 2; // Inefficient to malloc() for less than 8 bytes if (needlen < 7) needlen = 7; if (objPtr->bytes == JimEmptyStringRep) objPtr->bytes = (char *)Jim_Alloc(needlen + 1); else objPtr->bytes = (char *)Jim_Realloc(objPtr->bytes, needlen + 1); objPtr->internalRep.strValue.maxLength = needlen; } memcpy(objPtr->bytes + objPtr->length, str, len); objPtr->bytes[objPtr->length + len] = '\0'; if (objPtr->internalRep.strValue.charLength >= 0) // Update the utf-8 char length objPtr->internalRep.strValue.charLength += utf8_strlen(objPtr->bytes + objPtr->length, len); objPtr->length += len; } // Higher level API to append strings to objects. Object must not be unshared for each of these. __device__ void Jim_AppendString(Jim_Interp *interp, Jim_Obj *objPtr, const char *str, int len) { JimPanic(Jim_IsShared(objPtr), "Jim_AppendString called with shared object"); SetStringFromAny(interp, objPtr); StringAppendString(objPtr, str, len); } __device__ void Jim_AppendObj(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *appendObjPtr) { int len; const char *str = Jim_GetString(appendObjPtr, &len); Jim_AppendString(interp, objPtr, str, len); } __device__ void Jim_AppendStrings(Jim_Interp *interp, Jim_Obj *objPtr, ...) { va_list va; va_start(va, objPtr); JimPanic(Jim_IsShared(objPtr), "Jim_AppendString_ called with shared object"); SetStringFromAny(interp, objPtr); while (1) { const char *s = va_arg(va, const char *); if (s == NULL) break; Jim_AppendString(interp, objPtr, s, -1); } va_end(va); } __device__ int Jim_StringEqObj(Jim_Obj *aObjPtr, Jim_Obj *bObjPtr) { if (aObjPtr == bObjPtr) return 1; else { int Alen, Blen; const char *sA = Jim_GetString(aObjPtr, &Alen); const char *sB = Jim_GetString(bObjPtr, &Blen); return (Alen == Blen && !memcmp(sA, sB, Alen)); } } // Note. Does not support embedded nulls in either the pattern or the object. __device__ int Jim_StringMatchObj(Jim_Interp *interp, Jim_Obj *patternObjPtr, Jim_Obj *objPtr, int nocase) { return JimGlobMatch(Jim_String(patternObjPtr), Jim_String(objPtr), nocase); } // Note: does not support embedded nulls for the nocase option. __device__ int Jim_StringCompareObj(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *secondObjPtr, int nocase) { int l1, l2; const char *s1 = Jim_GetString(firstObjPtr, &l1); const char *s2 = Jim_GetString(secondObjPtr, &l2); if (nocase) return JimStringCompareLen(s1, s2, -1, nocase); // Do a character compare for nocase return JimStringCompare(s1, l1, s2, l2); } // Like Jim_StringCompareObj() except compares to a maximum of the length of firstObjPtr. // Note: does not support embedded nulls __device__ int Jim_StringCompareLenObj(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *secondObjPtr, int nocase) { const char *s1 = Jim_String(firstObjPtr); const char *s2 = Jim_String(secondObjPtr); return JimStringCompareLen(s1, s2, Jim_Utf8Length(interp, firstObjPtr), nocase); } // Convert a range, as returned by Jim_GetRange(), into an absolute index into an object of the specified length. // This function may return negative values, or values greater than or equal to the length of the list if the index // is out of range. static __device__ int JimRelToAbsIndex(int len, int idx) { return (idx < 0 ? len + idx : idx); } // Convert a pair of indexes (*firstPtr, *lastPtr) as normalized by JimRelToAbsIndex(), into a form suitable for implementation of commands like [string range] and [lrange]. // The resulting range is guaranteed to address valid elements of the structure. // static __device__ void JimRelToAbsRange(int len, int *firstPtr, int *lastPtr, int *rangeLenPtr) { int rangeLen; if (*firstPtr > *lastPtr) rangeLen = 0; else { rangeLen = *lastPtr - *firstPtr + 1; if (rangeLen) { if (*firstPtr < 0) { rangeLen += *firstPtr; *firstPtr = 0; } if (*lastPtr >= len) { rangeLen -= (*lastPtr - (len - 1)); *lastPtr = len - 1; } } } if (rangeLen < 0) rangeLen = 0; *rangeLenPtr = rangeLen; } static __device__ int JimStringGetRange(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr, int len, int *first, int *last, int *range) { if (Jim_GetIndex(interp, firstObjPtr, first) != JIM_OK) return JIM_ERROR; if (Jim_GetIndex(interp, lastObjPtr, last) != JIM_OK) return JIM_ERROR; *first = JimRelToAbsIndex(len, *first); *last = JimRelToAbsIndex(len, *last); JimRelToAbsRange(len, first, last, range); return JIM_OK; } __device__ Jim_Obj *Jim_StringByteRangeObj(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) { int first, last; int rangeLen; int bytelen; const char *str = Jim_GetString(strObjPtr, &bytelen); if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, bytelen, &first, &last, &rangeLen) != JIM_OK) return NULL; if (first == 0 && rangeLen == bytelen) return strObjPtr; return Jim_NewStringObj(interp, str + first, rangeLen); } __device__ Jim_Obj *Jim_StringRangeObj(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) { #ifdef JIM_UTF8 int first, last; const char *str; int len, rangeLen; int bytelen; str = Jim_GetString(strObjPtr, &bytelen); len = Jim_Utf8Length(interp, strObjPtr); if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, len, &first, &last, &rangeLen) != JIM_OK) return NULL; if (first == 0 && rangeLen == len) return strObjPtr; if (len == bytelen) return Jim_NewStringObj(interp, str + first, rangeLen); // ASCII optimisation return Jim_NewStringObjUtf8(interp, str + utf8_index(str, first), rangeLen); #else return Jim_StringByteRangeObj(interp, strObjPtr, firstObjPtr, lastObjPtr); #endif } __device__ Jim_Obj *JimStringReplaceObj(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr, Jim_Obj *newStrObj) { int len = Jim_Utf8Length(interp, strObjPtr); int first, last, rangeLen; if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, len, &first, &last, &rangeLen) != JIM_OK) return NULL; if (last < first) return strObjPtr; const char *str = Jim_String(strObjPtr); // Before part Jim_Obj *objPtr = Jim_NewStringObjUtf8(interp, str, first); // Replacement if (newStrObj) Jim_AppendObj(interp, objPtr, newStrObj); // After part Jim_AppendString(interp, objPtr, str + utf8_index(str, last + 1), len - last - 1); return objPtr; } // Note: does not support embedded nulls. static __device__ void JimStrCopyUpperLower(char *dest, const char *str, int uc) { while (*str) { int c; str += utf8_tounicode(str, &c); dest += utf8_getchars(dest, uc ? utf8_upper(c) : utf8_lower(c)); } *dest = 0; } // Note: does not support embedded nulls. static __device__ Jim_Obj *JimStringToLower(Jim_Interp *interp, Jim_Obj *strObjPtr) { SetStringFromAny(interp, strObjPtr); int len; const char *str = Jim_GetString(strObjPtr, &len); #ifdef JIM_UTF8 // Case mapping can change the utf-8 length of the string. But at worst it will be by one extra byte per char len *= 2; #endif char *buf = (char *)Jim_Alloc(len + 1); JimStrCopyUpperLower(buf, str, 0); return Jim_NewStringObjNoAlloc(interp, buf, -1); } // Note: does not support embedded nulls. static __device__ Jim_Obj *JimStringToUpper(Jim_Interp *interp, Jim_Obj *strObjPtr) { if (strObjPtr->typePtr != &_stringObjType) SetStringFromAny(interp, strObjPtr); int len; const char *str = Jim_GetString(strObjPtr, &len); #ifdef JIM_UTF8 // Case mapping can change the utf-8 length of the string. But at worst it will be by one extra byte per char len *= 2; #endif char *buf = (char *)Jim_Alloc(len + 1); JimStrCopyUpperLower(buf, str, 1); return Jim_NewStringObjNoAlloc(interp, buf, -1); } // Note: does not support embedded nulls. static __device__ Jim_Obj *JimStringToTitle(Jim_Interp *interp, Jim_Obj *strObjPtr) { int len; const char *str = Jim_GetString(strObjPtr, &len); if (len == 0) return strObjPtr; #ifdef JIM_UTF8 // Case mapping can change the utf-8 length of the string. But at worst it will be by one extra byte per char len *= 2; #endif char *buf, *p; buf = p = (char *)Jim_Alloc(len + 1); int c; str += utf8_tounicode(str, &c); p += utf8_getchars(p, utf8_title(c)); JimStrCopyUpperLower(p, str, 0); return Jim_NewStringObjNoAlloc(interp, buf, -1); } // Similar to memchr() except searches a UTF-8 string 'str' of byte length 'len' for unicode character 'c'. Returns the position if found or NULL if not static __device__ const char *utf8_memchr(const char *str, int len, int c) { #ifdef JIM_UTF8 while (len) { int sc; int n = utf8_tounicode(str, &sc); if (sc == c) return str; str += n; len -= n; } return NULL; #else return (const char *)memchr(str, c, len); #endif } // Searches for the first non-trim char in string (str, len) // If none is found, returns just past the last char. // Lengths are in bytes. static __device__ const char *JimFindTrimLeft(const char *str, int len, const char *trimchars, int trimlen) { while (len) { int c; int n = utf8_tounicode(str, &c); if (utf8_memchr(trimchars, trimlen, c) == NULL) // Not a trim char, so stop break; str += n; len -= n; } return str; } // Searches backwards for a non-trim char in string (str, len). // Returns a pointer to just after the non-trim char, or NULL if not found. // Lengths are in bytes. static __device__ const char *JimFindTrimRight(const char *str, int len, const char *trimchars, int trimlen) { str += len; while (len) { int c; int n = utf8_prev_len(str, len); len -= n; str -= n; n = utf8_tounicode(str, &c); if (utf8_memchr(trimchars, trimlen, c) == NULL) return str + n; } return NULL; } __constant__ static const char _default_trim_chars[] = " \t\n\r"; // sizeof() here includes the null byte __constant__ static int default_trim_chars_len = sizeof(_default_trim_chars); static __device__ Jim_Obj *JimStringTrimLeft(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) { int len; const char *str = Jim_GetString(strObjPtr, &len); const char *trimchars = _default_trim_chars; int trimcharslen = default_trim_chars_len; const char *newstr; if (trimcharsObjPtr) trimchars = Jim_GetString(trimcharsObjPtr, &trimcharslen); newstr = JimFindTrimLeft(str, len, trimchars, trimcharslen); if (newstr == str) return strObjPtr; return Jim_NewStringObj(interp, newstr, len - (int)(newstr - str)); } static __device__ Jim_Obj *JimStringTrimRight(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) { int len; const char *trimchars = _default_trim_chars; int trimcharslen = default_trim_chars_len; const char *nontrim; if (trimcharsObjPtr) trimchars = Jim_GetString(trimcharsObjPtr, &trimcharslen); SetStringFromAny(interp, strObjPtr); len = Jim_Length(strObjPtr); nontrim = JimFindTrimRight(strObjPtr->bytes, len, trimchars, trimcharslen); if (nontrim == NULL) return Jim_NewEmptyStringObj(interp); // All trim, so return a zero-length string if (nontrim == strObjPtr->bytes + len) return strObjPtr; // All non-trim, so return the original object if (Jim_IsShared(strObjPtr)) strObjPtr = Jim_NewStringObj(interp, strObjPtr->bytes, (int)(nontrim - strObjPtr->bytes)); else { // Can modify this string in place strObjPtr->bytes[nontrim - strObjPtr->bytes] = 0; strObjPtr->length = (int)(nontrim - strObjPtr->bytes); } return strObjPtr; } static __device__ Jim_Obj *JimStringTrim(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) { // First trim left. Jim_Obj *objPtr = JimStringTrimLeft(interp, strObjPtr, trimcharsObjPtr); // Now trim right strObjPtr = JimStringTrimRight(interp, objPtr, trimcharsObjPtr); // Note: refCount check is needed since objPtr may be emptyObj if (objPtr != strObjPtr && objPtr->refCount == 0) Jim_FreeNewObj(interp, objPtr); // We don't want this object to be leaked return strObjPtr; } // Some platforms don't have isascii - need a non-macro version #ifdef HAVE_ISASCII #define jim_isascii isascii #else static __device__ int jim_isascii(int c) { return !(c & ~0x7f); } #endif __constant__ static const char *const _strclassnames[] = { "integer", "alpha", "alnum", "ascii", "digit", "double", "lower", "upper", "space", "xdigit", "control", "print", "graph", "punct", NULL }; enum { STR_IS_INTEGER, STR_IS_ALPHA, STR_IS_ALNUM, STR_IS_ASCII, STR_IS_DIGIT, STR_IS_DOUBLE, STR_IS_LOWER, STR_IS_UPPER, STR_IS_SPACE, STR_IS_XDIGIT, STR_IS_CONTROL, STR_IS_PRINT, STR_IS_GRAPH, STR_IS_PUNCT }; static __device__ int JimStringIs(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *strClass, int strict) { int strclass; int i; int (*isclassfunc)(int c) = NULL; if (Jim_GetEnum(interp, strClass, _strclassnames, &strclass, "class", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; int len; const char *str = Jim_GetString(strObjPtr, &len); if (len == 0) { Jim_SetResultBool(interp, !strict); return JIM_OK; } switch (strclass) { case STR_IS_INTEGER: { jim_wide w; Jim_SetResultBool(interp, JimGetWideNoErr(interp, strObjPtr, &w) == JIM_OK); return JIM_OK; } case STR_IS_DOUBLE: { double d; Jim_SetResultBool(interp, Jim_GetDouble(interp, strObjPtr, &d) == JIM_OK && errno != ERANGE); return JIM_OK; } // case STR_IS_ALPHA: for (i = 0; i < len; i++) if (!_isalpha(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_ALNUM: for (i = 0; i < len; i++) if (!isalnum(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_ASCII: for (i = 0; i < len; i++) if (!jim_isascii(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_DIGIT: for (i = 0; i < len; i++) if (!isdigit(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_LOWER: for (i = 0; i < len; i++) if (!_islower(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_UPPER: for (i = 0; i < len; i++) if (!_isupper(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_SPACE: for (i = 0; i < len; i++) if (!isspace(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_XDIGIT: for (i = 0; i < len; i++) if (!_isxdigit(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_CONTROL: for (i = 0; i < len; i++) if (!_iscntrl(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // case STR_IS_PRINT: for (i = 0; i < len; i++) if (!_isprint(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // //case STR_IS_GRAPH: for (i = 0; i < len; i++) if (!_isgraph(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // //case STR_IS_PUNCT: for (i = 0; i < len; i++) if (!_ispunct(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } break; // default: // return JIM_ERROR; // } //#else case STR_IS_ALPHA: isclassfunc = isalpha; break; case STR_IS_ALNUM: isclassfunc = isalnum; break; case STR_IS_ASCII: isclassfunc = jim_isascii; break; case STR_IS_DIGIT: isclassfunc = isdigit; break; case STR_IS_LOWER: isclassfunc = islower; break; case STR_IS_UPPER: isclassfunc = isupper; break; case STR_IS_SPACE: isclassfunc = isspace; break; case STR_IS_XDIGIT: isclassfunc = isxdigit; break; case STR_IS_CONTROL: isclassfunc = iscntrl; break; case STR_IS_PRINT: isclassfunc = isprint; break; case STR_IS_GRAPH: isclassfunc = isgraph; break; case STR_IS_PUNCT: isclassfunc = ispunct; break; default: return JIM_ERROR; } for (i = 0; i < len; i++) { if (!isclassfunc(str[i])) { Jim_SetResultBool(interp, 0); return JIM_OK; } } Jim_SetResultBool(interp, 1); return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Compared String Object // ----------------------------------------------------------------------------- #pragma region Compared String Object // This is strange object that allows comparison of a C literal string with a Jim object in a very short time if the same comparison is done // multiple times. For example every time the [if] command is executed, Jim has to check if a given argument is "else". // If the code has no errors, this comparison is true most of the time, so we can cache the pointer of the string of the last matching // comparison inside the object. Because most C compilers perform literal sharing, so that: char *x = "foo", char *y = "foo", will lead to x == y, // this works pretty well even if comparisons are at different places inside the C code. __constant__ static const Jim_ObjType _comparedStringObjType = { "compared-string", NULL, NULL, NULL, JIM_TYPE_REFERENCES, }; // The only way this object is exposed to the API is via the following function. Returns true if the string and the object string repr. // are the same, otherwise zero is returned. // Note: this isn't binary safe, but it hardly needs to be.*/ __device__ int Jim_CompareStringImmediate(Jim_Interp *interp, Jim_Obj *objPtr, const char *str) { if (objPtr->typePtr == &_comparedStringObjType && objPtr->internalRep.ptr == str) return 1; else { const char *objStr = Jim_String(objPtr); if (strcmp(str, objStr) != 0) return 0; if (objPtr->typePtr != &_comparedStringObjType) { Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_comparedStringObjType; } objPtr->internalRep.ptr = (char *)str; // ATTENTION: const cast return 1; } } static __device__ int qsortCompareStringPointers(const void *a, const void *b) { char *const *sa = (char *const *)a; char *const *sb = (char *const *)b; return strcmp(*sa, *sb); } #pragma endregion // ----------------------------------------------------------------------------- // Source Object // // This object is just a string from the language point of view, but the internal representation contains the filename and line number // where this token was read. This information is used by Jim_EvalObj() if the object passed happens to be of type "source". // // This allows propagation of the information about line numbers and file names and gives error messages with absolute line numbers. // // Note that this object uses the internal representation of the Jim_Object, so there is almost no memory overhead. (One Jim_Obj for each filename). // // Also the object will be converted to something else if the given token it represents in the source file is not something to be // evaluated (not a script), and will be specialized in some other way, so the time overhead is also almost zero. // ----------------------------------------------------------------------------- #pragma region Source Object static __device__ void FreeSourceInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupSourceInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); __constant__ static const Jim_ObjType _sourceObjType = { "source", FreeSourceInternalRep, DupSourceInternalRep, NULL, JIM_TYPE_REFERENCES, }; __device__ void FreeSourceInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.sourceValue.fileNameObj); } __device__ void DupSourceInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { dupPtr->internalRep.sourceValue = srcPtr->internalRep.sourceValue; Jim_IncrRefCount(dupPtr->internalRep.sourceValue.fileNameObj); } static __device__ void JimSetSourceInfo(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *fileNameObj, int lineNumber) { JimPanic(Jim_IsShared(objPtr), "JimSetSourceInfo called with shared object"); JimPanic(objPtr->typePtr != NULL, "JimSetSourceInfo called with typed object"); Jim_IncrRefCount(fileNameObj); objPtr->internalRep.sourceValue.fileNameObj = fileNameObj; objPtr->internalRep.sourceValue.lineNumber = lineNumber; objPtr->typePtr = &_sourceObjType; } #pragma endregion // ----------------------------------------------------------------------------- // ScriptLine Object // This object is used only in the Script internal represenation. For each line of the script, it holds the number of tokens on the line and the source line number. #pragma region ScriptLine Object __constant__ static const Jim_ObjType _scriptLineObjType = { "scriptline", NULL, NULL, NULL, JIM_NONE, }; static __device__ Jim_Obj *JimNewScriptLineObj(Jim_Interp *interp, int argc, int line) { Jim_Obj *objPtr; #ifdef DEBUG_SHOW_SCRIPT char buf[100]; snprintf(buf, sizeof(buf), "line=%d, argc=%d", line, argc); objPtr = Jim_NewStringObj(interp, buf, -1); #else objPtr = Jim_NewEmptyStringObj(interp); #endif objPtr->typePtr = &_scriptLineObjType; objPtr->internalRep.scriptLineValue.argc = argc; objPtr->internalRep.scriptLineValue.line = line; return objPtr; } #pragma endregion // Script Object // // This object holds the parsed internal representation of a script. This representation is help within an allocated ScriptObj (see below) #pragma region Script Object static __device__ void FreeScriptInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupScriptInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void JimSetScriptFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); static __device__ int JimParseCheckMissing(Jim_Interp *interp, int ch); __constant__ static const Jim_ObjType _scriptObjType = { "script", FreeScriptInternalRep, DupScriptInternalRep, NULL, JIM_TYPE_REFERENCES, }; // Each token of a script is represented by a ScriptToken. The ScriptToken contains a type and a Jim_Obj. The Jim_Obj can be specialized by commands operating on it. typedef struct ScriptToken { Jim_Obj *objPtr; int type; } ScriptToken; // This is the script object internal representation. An array of ScriptToken structures, including a pre-computed representation of the command length and arguments. // // For example the script: // // puts hello // set $i $x$y [foo]BAR // // will produce a ScriptObj with the following ScriptToken's: // // LIN 2 // ESC puts // ESC hello // LIN 4 // ESC set // VAR i // WRD 2 // VAR x // VAR y // WRD 2 // CMD foo // ESC BAR // // "puts hello" has two args (LIN 2), composed of single tokens. (Note that the WRD token is omitted for the common case of a single token.) // // "set $i $x$y [foo]BAR" has four (LIN 4) args, the first word has 1 token (ESC SET), and the last has two tokens (WRD 2 CMD foo ESC BAR) // // The precomputation of the command structure makes Jim_Eval() faster, and simpler because there aren't dynamic lengths / allocations. // // -- {expand}/{*} handling -- // // Expand is handled in a special way. // // If a "word" begins with {*}, the word token count is -ve. // // For example the command: // // list {*}{a b} // // Will produce the following cmdstruct array: // // LIN 2 // ESC list // WRD -1 // STR a b // // Note that the 'LIN' token also contains the source information for the first word of the line for error reporting purposes // // -- the substFlags field of the structure -- // // The scriptObj structure is used to represent both "script" objects and "subst" objects. In the second case, the there are no LIN and WRD // tokens. Instead SEP and EOL tokens are added as-is. In addition, the field 'substFlags' is used to represent the flags used to turn // the string into the internal representation. If these flags do not match what the application requires, // the scriptObj is created again. For example the script: // // subst -nocommands $string // subst -novariables $string // // Will (re)create the internal representation of the $string object two times. typedef struct ScriptObj { ScriptToken *token; // Tokens array Jim_Obj *fileNameObj; // Filename int len; // Length of token[] int substFlags; // flags used for the compilation of "subst" objects int inUse; // Used to share a ScriptObj. Currently only used by Jim_EvalObj() as protection against shimmering of the currently evaluated object. int firstline; // Line number of the first line int linenr; // Error line number, if any int missing; // Missing char if script failed to parse, (or space or backslash if OK) } ScriptObj; __device__ void FreeScriptInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { struct ScriptObj *script = (struct ScriptObj *)objPtr->internalRep.ptr; if (--script->inUse != 0) return; for (int i = 0; i < script->len; i++) Jim_DecrRefCount(interp, script->token[i].objPtr); Jim_Free(script->token); Jim_DecrRefCount(interp, script->fileNameObj); Jim_Free(script); } __device__ void DupScriptInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); JIM_NOTUSED(srcPtr); // Just return a simple string. We don't try to preserve the source info since in practice scripts are never duplicated dupPtr->typePtr = NULL; } // A simple parse token. As the script is parsed, the created tokens point into the script string rep. typedef struct { const char *token; // Pointer to the start of the token int len; // Length of this token int type; // Token type int line; // Line number } ParseToken; // A list of parsed tokens representing a script. Tokens are added to this list as the script is parsed. It grows as needed. typedef struct { // Start with a statically allocated list of tokens which will be expanded with realloc if needed ParseToken *list; // Array of tokens int size; // Current size of the list int count; // Number of entries used ParseToken static_list[20]; // Small initial token space to avoid allocation } ParseTokenList; static __device__ void ScriptTokenListInit(ParseTokenList *tokenlist) { tokenlist->list = tokenlist->static_list; tokenlist->size = sizeof(tokenlist->static_list) / sizeof(ParseToken); tokenlist->count = 0; } static __device__ void ScriptTokenListFree(ParseTokenList *tokenlist) { if (tokenlist->list != tokenlist->static_list) Jim_Free(tokenlist->list); } // Adds the new token to the tokenlist. The token has the given length, type and line number. The token list is resized as necessary. static __device__ void ScriptAddToken(ParseTokenList *tokenlist, const char *token, int len, int type, int line) { if (tokenlist->count == tokenlist->size) { // Resize the list tokenlist->size *= 2; if (tokenlist->list != tokenlist->static_list) tokenlist->list = (ParseToken *)Jim_Realloc(tokenlist->list, tokenlist->size * sizeof(*tokenlist->list)); else { // The list needs to become allocated tokenlist->list = (ParseToken *)Jim_Alloc(tokenlist->size * sizeof(*tokenlist->list)); memcpy(tokenlist->list, tokenlist->static_list, tokenlist->count * sizeof(*tokenlist->list)); } } ParseToken *t = &tokenlist->list[tokenlist->count++]; t->token = token; t->len = len; t->type = type; t->line = line; } // Counts the number of adjoining non-separator tokens. // // Returns -ve if the first token is the expansion operator (in which case the count doesn't include that token). static __device__ int JimCountWordTokens(ParseToken *t) { int expand = 1; int count = 0; // Is the first word {*} or {expand}? if (t->type == JIM_TT_STR && !TOKEN_IS_SEP(t[1].type)) { if ((t->len == 1 && *t->token == '*') || (t->len == 6 && strncmp(t->token, "expand", 6) == 0)) { expand = -1; t++; } // Create an expand token } // Now count non-separator words while (!TOKEN_IS_SEP(t->type)) { t++; count++; } return count * expand; } // Create a script/subst object from the given token. static __device__ Jim_Obj *JimMakeScriptObj(Jim_Interp *interp, const ParseToken *t) { if (t->type == JIM_TT_ESC && memchr(t->token, '\\', t->len) != NULL) { // Convert backlash escapes. The result will never be longer than the original int len = t->len; char *str = (char *)Jim_Alloc(len + 1); len = JimEscape(str, t->token, len); return Jim_NewStringObjNoAlloc(interp, str, len); } // XXX: For strict Tcl compatibility, JIM_TT_STR should replace <backslash><newline><whitespace> with a single space. return Jim_NewStringObj(interp, t->token, t->len); } // Takes a tokenlist and creates the allocated list of script tokens in script->token, of length script->len. // Unnecessary tokens are discarded, and LINE and WORD tokens are inserted as required. // Also sets script->line to the line number of the first token static __device__ void ScriptObjAddTokens(Jim_Interp *interp, struct ScriptObj *script, ParseTokenList *tokenlist) { int i; int lineargs = 0; // Number of tokens so far for the current command #ifdef DEBUG_SHOW_SCRIPT_TOKENS printf("==== Tokens ====\n"); for (i = 0; i < tokenlist->count; i++) #if __CUDACC__ printf("[%2d]@%d %s '%s'\n", i, tokenlist->list[i].line, jim_tt_name(tokenlist->list[i].type), tokenlist->list[i].token); #else printf("[%2d]@%d %s '%.*s'\n", i, tokenlist->list[i].line, jim_tt_name(tokenlist->list[i].type), tokenlist->list[i].len, tokenlist->list[i].token); #endif #endif // May need up to one extra script token for each EOL in the worst case int count = tokenlist->count; for (i = 0; i < tokenlist->count; i++) if (tokenlist->list[i].type == JIM_TT_EOL) count++; int linenr = script->firstline = tokenlist->list[0].line; struct ScriptToken *token = script->token = (ScriptToken *)Jim_Alloc(sizeof(ScriptToken) * count); // This is the first token for the current command ScriptToken *linefirst = token++; // This is the first token for the current command for (i = 0; i < tokenlist->count; ) { // Skip any leading separators while (tokenlist->list[i].type == JIM_TT_SEP) i++; // Look ahead to find out how many tokens make up the next word int wordtokens = JimCountWordTokens(tokenlist->list + i); if (wordtokens == 0) { // None, so at end of line if (lineargs) { linefirst->type = JIM_TT_LINE; linefirst->objPtr = JimNewScriptLineObj(interp, lineargs, linenr); Jim_IncrRefCount(linefirst->objPtr); // Reset for new line lineargs = 0; linefirst = token++; } i++; continue; } else if (wordtokens != 1) { // More than 1, or {*}, so insert a WORD token token->type = JIM_TT_WORD; token->objPtr = Jim_NewIntObj(interp, wordtokens); Jim_IncrRefCount(token->objPtr); token++; if (wordtokens < 0) { // Skip the expand token i++; wordtokens = -wordtokens - 1; lineargs--; } } // First real token on the line, so record the line number if (lineargs == 0) linenr = tokenlist->list[i].line; lineargs++; // Add each non-separator word token to the line while (wordtokens--) { const ParseToken *t = &tokenlist->list[i++]; token->type = t->type; token->objPtr = JimMakeScriptObj(interp, t); Jim_IncrRefCount(token->objPtr); // Every object is initially a string of type 'source', but the internal type may be specialized during execution of the script. JimSetSourceInfo(interp, token->objPtr, script->fileNameObj, t->line); token++; } } if (lineargs == 0) token--; script->len = (int)(token - script->token); JimPanic(script->len >= count, "allocated script array is too short"); #ifdef DEBUG_SHOW_SCRIPT printf("==== Script (%s) ====\n", Jim_String(script->fileNameObj)); for (i = 0; i < script->len; i++) { const ScriptToken *t = &script->token[i]; printf("[%2d] %s %s\n", i, jim_tt_name(t->type), Jim_String(t->objPtr)); } #endif } // Sets an appropriate error message for a missing script/expression terminator. // Returns JIM_ERROR if 'ch' represents an unmatched/missing character. // Note that a trailing backslash is not considered to be an error. static __device__ int JimParseCheckMissing(Jim_Interp *interp, int ch) { const char *msg; switch (ch) { case '\\': case ' ': return JIM_OK; case '[': msg = "unmatched \"[\""; break; case '{': msg = "missing close-brace"; break; case '"': default: msg = "missing quote"; break; } Jim_SetResultString(interp, msg, -1); return JIM_ERROR; } // Similar to ScriptObjAddTokens(), but for subst objects. static __device__ void SubstObjAddTokens(Jim_Interp *interp, struct ScriptObj *script, ParseTokenList *tokenlist) { int i; struct ScriptToken *token = script->token = (ScriptToken *)Jim_Alloc(sizeof(ScriptToken) * tokenlist->count); for (i = 0; i < tokenlist->count; i++) { const ParseToken *t = &tokenlist->list[i]; // Create a token for 't' token->type = t->type; token->objPtr = JimMakeScriptObj(interp, t); Jim_IncrRefCount(token->objPtr); token++; } script->len = i; } // This method takes the string representation of an object as a Tcl script, and generates the pre-parsed internal representation of the script. // On parse error, sets an error message and returns JIM_ERROR (Note: the object is still converted to a script, even if an error occurs) static __device__ void JimSetScriptFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { // Try to get information about filename / line number int line = 1; if (objPtr->typePtr == &_sourceObjType) line = objPtr->internalRep.sourceValue.lineNumber; // Initially parse the script into tokens (in tokenlist) ParseTokenList tokenlist; ScriptTokenListInit(&tokenlist); int scriptTextLen; const char *scriptText = Jim_GetString(objPtr, &scriptTextLen); struct JimParserCtx parser; JimParserInit(&parser, scriptText, scriptTextLen, line); while (!parser.eof) { JimParseScript(&parser); ScriptAddToken(&tokenlist, parser.tstart, (int)(parser.tend - parser.tstart) + 1, parser.tt, parser.tline); } // Add a final EOF token ScriptAddToken(&tokenlist, scriptText + scriptTextLen, 0, JIM_TT_EOF, 0); // Create the "real" script tokens from the parsed tokens struct ScriptObj *script = (ScriptObj *)Jim_Alloc(sizeof(*script)); memset(script, 0, sizeof(*script)); script->inUse = 1; script->fileNameObj = (objPtr->typePtr == &_sourceObjType ? objPtr->internalRep.sourceValue.fileNameObj : interp->emptyObj); Jim_IncrRefCount(script->fileNameObj); script->missing = parser.missing.ch; script->linenr = parser.missing.line; ScriptObjAddTokens(interp, script, &tokenlist); // No longer need the token list ScriptTokenListFree(&tokenlist); // Free the old internal rep and set the new one. Jim_FreeIntRep(interp, objPtr); Jim_SetIntRepPtr(objPtr, script); objPtr->typePtr = &_scriptObjType; } static __device__ void JimAddErrorToStack(Jim_Interp *interp, ScriptObj *script); // Returns the parsed script. Note that if there is any possibility that the script is not valid, call JimScriptValid() to check __device__ ScriptObj *JimGetScript(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr == interp->emptyObj) objPtr = interp->nullScriptObj; // Avoid converting emptyObj to a script. use nullScriptObj instead. if (objPtr->typePtr != &_scriptObjType || ((struct ScriptObj *)Jim_GetIntRepPtr(objPtr))->substFlags) JimSetScriptFromAny(interp, objPtr); return (ScriptObj *)Jim_GetIntRepPtr(objPtr); } // Returns 1 if the script is valid (parsed ok), otherwise returns 0 and leaves an error message in the interp result. static __device__ int JimScriptValid(Jim_Interp *interp, ScriptObj *script) { if (JimParseCheckMissing(interp, script->missing) == JIM_ERROR) { JimAddErrorToStack(interp, script); return 0; } return 1; } #pragma endregion // ----------------------------------------------------------------------------- // Commands // ----------------------------------------------------------------------------- #pragma region Commands static __device__ void JimIncrCmdRefCount(Jim_Cmd *cmdPtr) { cmdPtr->inUse++; } static __device__ void JimDecrCmdRefCount(Jim_Interp *interp, Jim_Cmd *cmdPtr) { if (--cmdPtr->inUse == 0) { if (cmdPtr->isproc) { Jim_DecrRefCount(interp, cmdPtr->u.proc.argListObjPtr); Jim_DecrRefCount(interp, cmdPtr->u.proc.bodyObjPtr); Jim_DecrRefCount(interp, cmdPtr->u.proc.nsObj); if (cmdPtr->u.proc.staticVars) { Jim_FreeHashTable(cmdPtr->u.proc.staticVars); Jim_Free(cmdPtr->u.proc.staticVars); } } // native (C) else if (cmdPtr->u.native.delProc) cmdPtr->u.native.delProc(cmdPtr->u.native.privData, interp); // Delete any pushed command too if (cmdPtr->prevCmd) JimDecrCmdRefCount(interp, cmdPtr->prevCmd); Jim_Free(cmdPtr); } } // Variables HashTable Type. // Keys are dynamically allocated strings, Values are Jim_Var structures. // Variables HashTable Type. // Keys are dynamic allocated strings, Values are Jim_Var structures. */ static __device__ void JimVariablesHTValDestructor(void *interp, void *val) { Jim_DecrRefCount((Jim_Interp *)interp, ((Jim_Var *)val)->objPtr); Jim_Free(val); } __constant__ static const Jim_HashTableType JimVariablesHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor JimVariablesHTValDestructor // val destructor }; // Commands HashTable Type. // Keys are dynamic allocated strings, Values are Jim_Cmd structures. static __device__ void JimCommandsHT_ValDestructor(void *interp, void *val) { JimDecrCmdRefCount((Jim_Interp *)interp, (Jim_Cmd *)val); } __constant__ static const Jim_HashTableType JimCommandsHashTableType = { JimStringCopyHTHashFunction, // hash function JimStringCopyHTDup, // key dup NULL, // val dup JimStringCopyHTKeyCompare, // key compare JimStringCopyHTKeyDestructor, // key destructor JimCommandsHT_ValDestructor // val destructor }; // ------------------------- Commands related functions --------------------- #ifdef jim_ext_namespace // Returns the "unscoped" version of the given namespace. That is, the fully qualified name without the leading :: // The returned value is either nsObj, or an object with a zero ref count. static __device__ Jim_Obj *JimQualifyNameObj(Jim_Interp *interp, Jim_Obj *nsObj) { const char *name = Jim_String(nsObj); if (name[0] == ':' && name[1] == ':') { while (*++name == ':') { } // This command is being defined in the global namespace nsObj = Jim_NewStringObj(interp, name, -1); } else if (Jim_Length(interp->framePtr->nsObj)) { // This command is being defined in a non-global namespace nsObj = Jim_DuplicateObj(interp, interp->framePtr->nsObj); Jim_AppendStrings(interp, nsObj, "::", name, NULL); } return nsObj; } __device__ Jim_Obj *Jim_MakeGlobalNamespaceName(Jim_Interp *interp, Jim_Obj *nameObjPtr) { const char *name = Jim_String(nameObjPtr); if (name[0] == ':' && name[1] == ':') return nameObjPtr; Jim_IncrRefCount(nameObjPtr); Jim_Obj *resultObj = Jim_NewStringObj(interp, "::", -1); Jim_AppendObj(interp, resultObj, nameObjPtr); Jim_DecrRefCount(interp, nameObjPtr); return resultObj; } // An efficient version of JimQualifyNameObj() where the name is available (and needed) as a 'const char *'. // Avoids creating an object if not necessary. The object stored in *objPtrPtr should be disposed of with JimFreeQualifiedName() after use. static __device__ const char *JimQualifyName(Jim_Interp *interp, const char *name, Jim_Obj **objPtrPtr) { Jim_Obj *objPtr = interp->emptyObj; if (name[0] == ':' && name[1] == ':') while (*++name == ':') { } // This command is being defined in the global namespace else if (Jim_Length(interp->framePtr->nsObj)) { // This command is being defined in a non-global namespace objPtr = Jim_DuplicateObj(interp, interp->framePtr->nsObj); Jim_AppendStrings(interp, objPtr, "::", name, NULL); name = Jim_String(objPtr); } Jim_IncrRefCount(objPtr); *objPtrPtr = objPtr; return name; } #define JimFreeQualifiedName(INTERP, OBJ) Jim_DecrRefCount((INTERP), (OBJ)) #else // We can be more efficient in the no-namespace case #define JimQualifyName(INTERP, NAME, DUMMY) ((NAME)[0] == ':' && (NAME)[1] == ':' ? (NAME) + 2 : (NAME)) #define JimFreeQualifiedName(INTERP, DUMMY) (void)(DUMMY) __device__ Jim_Obj *Jim_MakeGlobalNamespaceName(Jim_Interp *interp, Jim_Obj *nameObjPtr) { return nameObjPtr; } #endif static __device__ int JimCreateCommand(Jim_Interp *interp, const char *name, Jim_Cmd *cmd) { // It may already exist, so we try to delete the old one. Note that reference count means that it won't be deleted yet if it exists in the call stack. // BUT, if 'local' is in force, instead of deleting the existing proc, we stash a reference to the old proc here. Jim_HashEntry *he = Jim_FindHashEntry(&interp->commands, name); // There was an old cmd with the same name, so this requires a 'proc epoch' update. // If a procedure with the same name didn't exist there is no need to increment the 'proc epoch' because creation of a new procedure // can never affect existing cached commands. We don't do negative caching. if (he) Jim_InterpIncrProcEpoch(interp); if (he && interp->local) { // Push this command over the top of the previous one cmd->prevCmd = (Jim_Cmd *)Jim_GetHashEntryVal(he); Jim_SetHashVal(&interp->commands, he, cmd); } else { // Replace the existing command if (he) Jim_DeleteHashEntry(&interp->commands, name); Jim_AddHashEntry(&interp->commands, name, cmd); } return JIM_OK; } __device__ int Jim_CreateCommand(Jim_Interp *interp, const char *cmdNameStr, Jim_CmdProc cmdProc, void *privData, Jim_DelCmdProc delProc) { Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_Alloc(sizeof(*cmdPtr)); // Store the new details for this command memset(cmdPtr, 0, sizeof(*cmdPtr)); cmdPtr->inUse = 1; cmdPtr->u.native.delProc = delProc; cmdPtr->u.native.cmdProc = cmdProc; cmdPtr->u.native.privData = privData; JimCreateCommand(interp, cmdNameStr, cmdPtr); return JIM_OK; } static __device__ int JimCreateProcedureStatics(Jim_Interp *interp, Jim_Cmd *cmdPtr, Jim_Obj *staticsListObjPtr) { int len = Jim_ListLength(interp, staticsListObjPtr); if (len == 0) return JIM_OK; cmdPtr->u.proc.staticVars = (Jim_HashTable *)Jim_Alloc(sizeof(Jim_HashTable)); Jim_InitHashTable(cmdPtr->u.proc.staticVars, &JimVariablesHashTableType, interp); for (int i = 0; i < len; i++) { Jim_Obj *objPtr = Jim_ListGetIndex(interp, staticsListObjPtr, i); // Check if it's composed of two elements. int subLen = Jim_ListLength(interp, objPtr); if (subLen == 1 || subLen == 2) { // Try to get the variable value from the current environment. Jim_Obj *initObjPtr; Jim_Obj *nameObjPtr = Jim_ListGetIndex(interp, objPtr, 0); if (subLen == 1) { initObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_NONE); if (initObjPtr == NULL) { Jim_SetResultFormatted(interp, "variable for initialization of static \"%#s\" not found in the local context", nameObjPtr); return JIM_ERROR; } } else initObjPtr = Jim_ListGetIndex(interp, objPtr, 1); if (JimValidName(interp, "static variable", nameObjPtr) != JIM_OK) return JIM_ERROR; Jim_Var *varPtr = (Jim_Var *)Jim_Alloc(sizeof(*varPtr)); varPtr->objPtr = initObjPtr; Jim_IncrRefCount(initObjPtr); varPtr->linkFramePtr = NULL; if (Jim_AddHashEntry(cmdPtr->u.proc.staticVars, Jim_String(nameObjPtr), varPtr) != JIM_OK) { Jim_SetResultFormatted(interp, "static variable name \"%#s\" duplicated in statics list", nameObjPtr); Jim_DecrRefCount(interp, initObjPtr); Jim_Free(varPtr); return JIM_ERROR; } } else { Jim_SetResultFormatted(interp, "too many fields in static specifier \"%#s\"", objPtr); return JIM_ERROR; } } return JIM_OK; } static __device__ void JimUpdateProcNamespace(Jim_Interp *interp, Jim_Cmd *cmdPtr, const char *cmdname) { #ifdef jim_ext_namespace if (cmdPtr->isproc) { const char *pt = strrchr((char *)cmdname, ':'); // XXX: Really need JimNamespaceSplit() if (pt && pt != cmdname && pt[-1] == ':') { Jim_DecrRefCount(interp, cmdPtr->u.proc.nsObj); cmdPtr->u.proc.nsObj = Jim_NewStringObj(interp, cmdname, (int)(pt - cmdname - 1)); Jim_IncrRefCount(cmdPtr->u.proc.nsObj); // This commands shadows a global command, so a proc epoch update is required if (Jim_FindHashEntry(&interp->commands, pt + 1)) Jim_InterpIncrProcEpoch(interp); } } #endif } static __device__ Jim_Cmd *JimCreateProcedureCmd(Jim_Interp *interp, Jim_Obj *argListObjPtr, Jim_Obj *staticsListObjPtr, Jim_Obj *bodyObjPtr, Jim_Obj *nsObj) { int argListLen = Jim_ListLength(interp, argListObjPtr); // Allocate space for both the command pointer and the arg list Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_Alloc(sizeof(*cmdPtr) + sizeof(struct Jim_Cmd::a_::c_::Jim_ProcArg) * argListLen); memset(cmdPtr, 0, sizeof(*cmdPtr)); cmdPtr->inUse = 1; cmdPtr->isproc = 1; cmdPtr->u.proc.argListObjPtr = argListObjPtr; cmdPtr->u.proc.argListLen = argListLen; cmdPtr->u.proc.bodyObjPtr = bodyObjPtr; cmdPtr->u.proc.argsPos = -1; cmdPtr->u.proc.arglist = (struct Jim_Cmd::a_::c_::Jim_ProcArg *)(cmdPtr + 1); cmdPtr->u.proc.nsObj = nsObj ? nsObj : interp->emptyObj; Jim_IncrRefCount(argListObjPtr); Jim_IncrRefCount(bodyObjPtr); Jim_IncrRefCount(cmdPtr->u.proc.nsObj); // Create the statics hash table. if (staticsListObjPtr && JimCreateProcedureStatics(interp, cmdPtr, staticsListObjPtr) != JIM_OK) goto err; // Parse the args out into arglist, validating as we go Examine the argument list for default parameters and 'args' for (int i = 0; i < argListLen; i++) { // Examine a parameter Jim_Obj *argPtr = Jim_ListGetIndex(interp, argListObjPtr, i); int len = Jim_ListLength(interp, argPtr); if (len == 0) { Jim_SetResultString(interp, "argument with no name", -1); err: JimDecrCmdRefCount(interp, cmdPtr); return NULL; } if (len > 2) { Jim_SetResultFormatted(interp, "too many fields in argument specifier \"%#s\"", argPtr); goto err; } Jim_Obj *nameObjPtr; Jim_Obj *defaultObjPtr; if (len == 2) { // Optional parameter nameObjPtr = Jim_ListGetIndex(interp, argPtr, 0); defaultObjPtr = Jim_ListGetIndex(interp, argPtr, 1); } else { // Required parameter nameObjPtr = argPtr; defaultObjPtr = NULL; } if (Jim_CompareStringImmediate(interp, nameObjPtr, "args")) { if (cmdPtr->u.proc.argsPos >= 0) { Jim_SetResultString(interp, "'args' specified more than once", -1); goto err; } cmdPtr->u.proc.argsPos = i; } else { if (len == 2) cmdPtr->u.proc.optArity++; else cmdPtr->u.proc.reqArity++; } cmdPtr->u.proc.arglist[i].nameObjPtr = nameObjPtr; cmdPtr->u.proc.arglist[i].defaultObjPtr = defaultObjPtr; } return cmdPtr; } __device__ int Jim_DeleteCommand(Jim_Interp *interp, const char *name) { int ret = JIM_OK; Jim_Obj *qualifiedNameObj; const char *qualname = JimQualifyName(interp, name, &qualifiedNameObj); if (Jim_DeleteHashEntry(&interp->commands, qualname) == JIM_ERROR) { Jim_SetResultFormatted(interp, "can't delete \"%s\": command doesn't exist", name); ret = JIM_ERROR; } else Jim_InterpIncrProcEpoch(interp); JimFreeQualifiedName(interp, qualifiedNameObj); return ret; } __device__ int Jim_RenameCommand(Jim_Interp *interp, const char *oldName, const char *newName) { int ret = JIM_ERROR; if (newName[0] == 0) return Jim_DeleteCommand(interp, oldName); Jim_Obj *qualifiedOldNameObj; Jim_Obj *qualifiedNewNameObj; const char *fqold = JimQualifyName(interp, oldName, &qualifiedOldNameObj); const char *fqnew = JimQualifyName(interp, newName, &qualifiedNewNameObj); // Does it exist? Jim_HashEntry *he = Jim_FindHashEntry(&interp->commands, fqold); if (he == NULL) Jim_SetResultFormatted(interp, "can't rename \"%s\": command doesn't exist", oldName); else if (Jim_FindHashEntry(&interp->commands, fqnew)) Jim_SetResultFormatted(interp, "can't rename to \"%s\": command already exists", newName); else { // Add the new name first Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_GetHashEntryVal(he); JimIncrCmdRefCount(cmdPtr); JimUpdateProcNamespace(interp, cmdPtr, fqnew); Jim_AddHashEntry(&interp->commands, fqnew, cmdPtr); // Now remove the old name Jim_DeleteHashEntry(&interp->commands, fqold); // Increment the epoch Jim_InterpIncrProcEpoch(interp); ret = JIM_OK; } JimFreeQualifiedName(interp, qualifiedOldNameObj); JimFreeQualifiedName(interp, qualifiedNewNameObj); return ret; } #pragma endregion // ----------------------------------------------------------------------------- // Command object // ----------------------------------------------------------------------------- #pragma region Command object static __device__ void FreeCommandInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.cmdValue.nsObj); } static __device__ void DupCommandInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { dupPtr->internalRep.cmdValue = srcPtr->internalRep.cmdValue; dupPtr->typePtr = srcPtr->typePtr; Jim_IncrRefCount(dupPtr->internalRep.cmdValue.nsObj); } __constant__ static const Jim_ObjType _commandObjType = { "command", FreeCommandInternalRep, DupCommandInternalRep, NULL, JIM_TYPE_REFERENCES, }; // This function returns the command structure for the command name stored in objPtr. It tries to specialize the objPtr to contain // a cached info instead to perform the lookup into the hash table every time. The information cached may not be uptodate, in such // a case the lookup is performed and the cache updated. // Respects the 'upcall' setting __device__ Jim_Cmd *Jim_GetCommand(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { Jim_Cmd *cmd; // In order to be valid, the proc epoch must match and the lookup must have occurred in the same namespace if (objPtr->typePtr != &_commandObjType || objPtr->internalRep.cmdValue.procEpoch != interp->procEpoch #ifdef jim_ext_namespace || !Jim_StringEqObj(objPtr->internalRep.cmdValue.nsObj, interp->framePtr->nsObj) #endif ) { // Not cached or out of date, so lookup // Do we need to try the local namespace? const char *name = Jim_String(objPtr); Jim_HashEntry *he; if (name[0] == ':' && name[1] == ':') while (*++name == ':') { } #ifdef jim_ext_namespace else if (Jim_Length(interp->framePtr->nsObj)) { // This command is being defined in a non-global namespace Jim_Obj *nameObj = Jim_DuplicateObj(interp, interp->framePtr->nsObj); Jim_AppendStrings(interp, nameObj, "::", name, NULL); he = Jim_FindHashEntry(&interp->commands, Jim_String(nameObj)); Jim_FreeNewObj(interp, nameObj); if (he) goto found; } #endif // Lookup in the global namespace he = Jim_FindHashEntry(&interp->commands, name); if (he == NULL) { if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "invalid command name \"%#s\"", objPtr); return NULL; } #ifdef jim_ext_namespace found: #endif cmd = (Jim_Cmd *)Jim_GetHashEntryVal(he); // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_commandObjType; objPtr->internalRep.cmdValue.procEpoch = interp->procEpoch; objPtr->internalRep.cmdValue.cmdPtr = cmd; objPtr->internalRep.cmdValue.nsObj = interp->framePtr->nsObj; Jim_IncrRefCount(interp->framePtr->nsObj); } else cmd = objPtr->internalRep.cmdValue.cmdPtr; while (cmd->u.proc.upcall) cmd = cmd->prevCmd; return cmd; } #pragma endregion // ----------------------------------------------------------------------------- // Variables // ----------------------------------------------------------------------------- // DEG: pragma for more? // ----------------------------------------------------------------------------- // Variable object // ----------------------------------------------------------------------------- #pragma region Variable object #define JIM_DICT_SUGAR 100 // Only returned by SetVariableFromAny() static __device__ int SetVariableFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); static __device__ const Jim_ObjType _variableObjType = { "variable", NULL, NULL, NULL, JIM_TYPE_REFERENCES, }; // Check that the name does not contain embedded nulls. Variable and procedure names are manipulated as null terminated strings, so don't allow names with embedded nulls. static __device__ int JimValidName(Jim_Interp *interp, const char *type, Jim_Obj *nameObjPtr) { // Variable names and proc names can't contain embedded nulls if (nameObjPtr->typePtr != &_variableObjType) { int len; const char *str = Jim_GetString(nameObjPtr, &len); if (memchr(str, '\0', len)) { Jim_SetResultFormatted(interp, "%s name contains embedded null", type); return JIM_ERROR; } } return JIM_OK; } // This method should be called only by the variable API. It returns JIM_OK on success (variable already exists), // JIM_ERROR if it does not exist, JIM_DICT_SUGAR if it's not a variable name, but syntax glue for [dict] i.e. the last character is ')' static __device__ int SetVariableFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { // Check if the object is already an uptodate variable Jim_CallFrame *framePtr; if (objPtr->typePtr == &_variableObjType) { framePtr = (objPtr->internalRep.varValue.global ? interp->topFramePtr : interp->framePtr); if (objPtr->internalRep.varValue.callFrameId == framePtr->id) return JIM_OK; // nothing to do // Need to re-resolve the variable in the updated callframe } else if (objPtr->typePtr == &_dictSubstObjType) return JIM_DICT_SUGAR; else if (JimValidName(interp, "variable", objPtr) != JIM_OK) return JIM_ERROR; int len; const char *varName = Jim_GetString(objPtr, &len); // Make sure it's not syntax glue to get/set dict. if (len && varName[len - 1] == ')' && strchr(varName, '(') != NULL) return JIM_DICT_SUGAR; int global; if (varName[0] == ':' && varName[1] == ':') { while (*++varName == ':') { } global = 1; framePtr = interp->topFramePtr; } else { global = 0; framePtr = interp->framePtr; } // Resolve this name in the variables hash table Jim_HashEntry *he = Jim_FindHashEntry(&framePtr->vars, varName); if (he == NULL) { if (!global && framePtr->staticVars) he = Jim_FindHashEntry(framePtr->staticVars, varName); // Try with static vars. if (he == NULL) return JIM_ERROR; } // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_variableObjType; objPtr->internalRep.varValue.callFrameId = framePtr->id; objPtr->internalRep.varValue.varPtr = (Jim_Var *)Jim_GetHashEntryVal(he); objPtr->internalRep.varValue.global = global; return JIM_OK; } // -------------------- Variables related functions ------------------------- static __device__ int JimDictSugarSet(Jim_Interp *interp, Jim_Obj *ObjPtr, Jim_Obj *valObjPtr); static __device__ Jim_Obj *JimDictSugarGet(Jim_Interp *interp, Jim_Obj *ObjPtr, int flags); static __device__ Jim_Var *JimCreateVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *valObjPtr) { // New variable to create Jim_Var *var = (Jim_Var *)Jim_Alloc(sizeof(*var)); var->objPtr = valObjPtr; Jim_IncrRefCount(valObjPtr); var->linkFramePtr = NULL; Jim_CallFrame *framePtr; int global; const char *name = Jim_String(nameObjPtr); if (name[0] == ':' && name[1] == ':') { while (*++name == ':') { } framePtr = interp->topFramePtr; global = 1; } else { framePtr = interp->framePtr; global = 0; } // Insert the new variable Jim_AddHashEntry(&framePtr->vars, name, var); // Make the object int rep a variable Jim_FreeIntRep(interp, nameObjPtr); nameObjPtr->typePtr = &_variableObjType; nameObjPtr->internalRep.varValue.callFrameId = framePtr->id; nameObjPtr->internalRep.varValue.varPtr = var; nameObjPtr->internalRep.varValue.global = global; return var; } // For now that's dummy. Variables lookup should be optimized in many ways, with caching of lookups, and possibly with a table of pre-allocated vars in every CallFrame for local vars. // All the caching should also have an 'epoch' mechanism similar to the one used by Tcl for procedures lookup caching. __device__ int Jim_SetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *valObjPtr, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } flags &= ~JIMGLOBAL_; switch (SetVariableFromAny(interp, nameObjPtr)) { case JIM_DICT_SUGAR: if (global) interp->framePtr = savedFramePtr; return JimDictSugarSet(interp, nameObjPtr, valObjPtr); case JIM_ERROR: if (JimValidName(interp, "variable", nameObjPtr) != JIM_OK) { if (global) interp->framePtr = savedFramePtr; return JIM_ERROR; } JimCreateVariable(interp, nameObjPtr, valObjPtr); break; case JIM_OK: Jim_Var *var = nameObjPtr->internalRep.varValue.varPtr; if (var->linkFramePtr == NULL) { Jim_IncrRefCount(valObjPtr); Jim_DecrRefCount(interp, var->objPtr); var->objPtr = valObjPtr; } // else handle the link else { Jim_CallFrame *savedCallFrame = interp->framePtr; interp->framePtr = var->linkFramePtr; int err = Jim_SetVariable(interp, var->objPtr, valObjPtr, 0); interp->framePtr = savedCallFrame; if (err != JIM_OK) { if (global) interp->framePtr = savedFramePtr; return err; } } } if (global) interp->framePtr = savedFramePtr; return JIM_OK; } __device__ int Jim_SetVariableStr(Jim_Interp *interp, const char *name, Jim_Obj *objPtr, int flags) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); int result = Jim_SetVariable(interp, nameObjPtr, objPtr, flags); Jim_DecrRefCount(interp, nameObjPtr); return result; } __device__ int Jim_SetVariableStrWithStr(Jim_Interp *interp, const char *name, const char *val, int flags) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_Obj *valObjPtr = Jim_NewStringObj(interp, val, -1); Jim_IncrRefCount(nameObjPtr); Jim_IncrRefCount(valObjPtr); int result = Jim_SetVariable(interp, nameObjPtr, valObjPtr, flags); Jim_DecrRefCount(interp, nameObjPtr); Jim_DecrRefCount(interp, valObjPtr); return result; } __device__ int Jim_SetVariableLink(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *targetNameObjPtr, Jim_CallFrame *targetCallFrame) { // Check for an existing variable or link Jim_Var *varPtr; switch (SetVariableFromAny(interp, nameObjPtr)) { case JIM_DICT_SUGAR: // XXX: This message seem unnecessarily verbose, but it matches Tcl Jim_SetResultFormatted(interp, "bad variable name \"%#s\": upvar won't create a scalar variable that looks like an array element", nameObjPtr); return JIM_ERROR; case JIM_OK: varPtr = nameObjPtr->internalRep.varValue.varPtr; if (varPtr->linkFramePtr == NULL) { Jim_SetResultFormatted(interp, "variable \"%#s\" already exists", nameObjPtr); return JIM_ERROR; } // It exists, but is a link, so first delete the link varPtr->linkFramePtr = NULL; break; } // Resolve the call frames for both variables. XXX: SetVariableFromAny() already did this! const char *varName = Jim_String(nameObjPtr); Jim_CallFrame *framePtr; if (varName[0] == ':' && varName[1] == ':') { while (*++varName == ':') { } // Linking a global var does nothing framePtr = interp->topFramePtr; } else framePtr = interp->framePtr; const char *targetName = Jim_String(targetNameObjPtr); if (targetName[0] == ':' && targetName[1] == ':') { while (*++targetName == ':') { } targetNameObjPtr = Jim_NewStringObj(interp, targetName, -1); targetCallFrame = interp->topFramePtr; } Jim_IncrRefCount(targetNameObjPtr); if (framePtr->level < targetCallFrame->level) { Jim_SetResultFormatted(interp, "bad variable name \"%#s\": upvar won't create namespace variable that refers to procedure variable", nameObjPtr); Jim_DecrRefCount(interp, targetNameObjPtr); return JIM_ERROR; } // Check for cycles if (framePtr == targetCallFrame) { Jim_Obj *objPtr = targetNameObjPtr; // Cycles are only possible with 'uplevel 0' while (1) { if (strcmp(Jim_String(objPtr), varName) == 0) { Jim_SetResultString(interp, "can't upvar from variable to itself", -1); Jim_DecrRefCount(interp, targetNameObjPtr); return JIM_ERROR; } if (SetVariableFromAny(interp, objPtr) != JIM_OK) break; varPtr = objPtr->internalRep.varValue.varPtr; if (varPtr->linkFramePtr != targetCallFrame) break; objPtr = varPtr->objPtr; } } // Perform the binding Jim_SetVariable(interp, nameObjPtr, targetNameObjPtr); // We are now sure 'nameObjPtr' type is variableObjType nameObjPtr->internalRep.varValue.varPtr->linkFramePtr = targetCallFrame; Jim_DecrRefCount(interp, targetNameObjPtr); return JIM_OK; } // Return the Jim_Obj pointer associated with a variable name, or NULL if the variable was not found in the current context. // The same optimization discussed in the comment to the 'SetVariable' function should apply here. // // If JIM_UNSHARED is set and the variable is an array element (dict sugar) in a dictionary which is shared, the array variable value is duplicated first. // This allows the array element to be updated (e.g. append, lappend) without affecting other references to the dictionary. __device__ Jim_Obj *Jim_GetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } flags &= ~JIMGLOBAL_; switch (SetVariableFromAny(interp, nameObjPtr)) { case JIM_OK: { Jim_Var *varPtr = nameObjPtr->internalRep.varValue.varPtr; if (varPtr->linkFramePtr == NULL) { if (global) interp->framePtr = savedFramePtr; return varPtr->objPtr; } else { Jim_Obj *objPtr; // The variable is a link? Resolve it. Jim_CallFrame *savedCallFrame = interp->framePtr; interp->framePtr = varPtr->linkFramePtr; objPtr = Jim_GetVariable(interp, varPtr->objPtr, flags); interp->framePtr = savedCallFrame; if (objPtr) { if (global) interp->framePtr = savedFramePtr; return objPtr; } // Error, so fall through to the error message } break; } case JIM_DICT_SUGAR: // [dict] syntax sugar if (global) interp->framePtr = savedFramePtr; return JimDictSugarGet(interp, nameObjPtr, flags); } if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "can't read \"%#s\": no such variable", nameObjPtr); if (global) interp->framePtr = savedFramePtr; return NULL; } __device__ Jim_Obj *Jim_GetVariableStr(Jim_Interp *interp, const char *name, int flags) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); Jim_Obj *varObjPtr = Jim_GetVariable(interp, nameObjPtr, flags); Jim_DecrRefCount(interp, nameObjPtr); return varObjPtr; } // Unset a variable. Note: On success unset invalidates all the variable objects created in the current call frame incrementing. */ __device__ int Jim_UnsetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } flags &= ~JIMGLOBAL_; int retval = SetVariableFromAny(interp, nameObjPtr); if (retval == JIM_DICT_SUGAR) { // [dict] syntax sugar. if (global) interp->framePtr = savedFramePtr; return JimDictSugarSet(interp, nameObjPtr, NULL); } else if (retval == JIM_OK) { Jim_Var *varPtr = nameObjPtr->internalRep.varValue.varPtr; // If it's a link call UnsetVariable recursively Jim_CallFrame *framePtr; if (varPtr->linkFramePtr) { framePtr = interp->framePtr; interp->framePtr = varPtr->linkFramePtr; retval = Jim_UnsetVariable(interp, varPtr->objPtr, JIM_NONE); interp->framePtr = framePtr; } else { const char *name = Jim_String(nameObjPtr); if (nameObjPtr->internalRep.varValue.global) { name += 2; framePtr = interp->topFramePtr; } else framePtr = interp->framePtr; retval = Jim_DeleteHashEntry(&framePtr->vars, name); // Change the callframe id, invalidating var lookup caching if (retval == JIM_OK) framePtr->id = interp->callFrameEpoch++; } } if (retval != JIM_OK && (flags & JIM_ERRMSG)) Jim_SetResultFormatted(interp, "can't unset \"%#s\": no such variable", nameObjPtr); if (global) interp->framePtr = savedFramePtr; return retval; } // ---------- Dict syntax sugar (similar to array Tcl syntax) -------------- // Given a variable name for [dict] operation syntax sugar, this function returns two objects, the first with the name // of the variable to set, and the second with the respective key. For example "foo(bar)" will return objects with string repr. of "foo" and "bar". // The returned objects have refcount = 1. The function can't fail. static __device__ void JimDictSugarParseVarKey(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj **varPtrPtr, Jim_Obj **keyPtrPtr) { int len; const char *str = Jim_GetString(objPtr, &len); const char *p = strchr(str, '('); JimPanic(p == NULL, "JimDictSugarParseVarKey() called for non-dict-sugar (%s)", str); Jim_Obj *varObjPtr = Jim_NewStringObj(interp, str, (int)(p - str)); p++; int keyLen = (int)(str + len - p); if (str[len - 1] == ')') keyLen--; // Create the objects with the variable name and key. Jim_Obj *keyObjPtr = Jim_NewStringObj(interp, p, keyLen); Jim_IncrRefCount(varObjPtr); Jim_IncrRefCount(keyObjPtr); *varPtrPtr = varObjPtr; *keyPtrPtr = keyObjPtr; } // Helper of Jim_SetVariable() to deal with dict-syntax variable names. Also used by Jim_UnsetVariable() with valObjPtr = NULL. static __device__ int JimDictSugarSet(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *valObjPtr) { SetDictSubstFromAny(interp, objPtr); int err = Jim_SetDictKeysVector(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, &objPtr->internalRep.dictSubstValue.indexObjPtr, 1, valObjPtr, JIM_MUSTEXIST); if (err == JIM_OK) // Don't keep an extra ref to the result Jim_ResetResult(interp); else { // Better error message for unset a(2) where a exists but a(2) doesn't if (!valObjPtr) { if (Jim_GetVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, JIM_NONE)) { Jim_SetResultFormatted(interp, "can't unset \"%#s\": no such element in array", objPtr); return err; } } // Make the error more informative and Tcl-compatible Jim_SetResultFormatted(interp, "can't %s \"%#s\": variable isn't array", (valObjPtr ? "set" : "unset"), objPtr); } return err; } // Expands the array variable (dict sugar) and returns the result, or NULL on error. // If JIM_UNSHARED is set and the dictionary is shared, it will be duplicated and stored back to the variable before expansion. static __device__ Jim_Obj *JimDictExpandArrayVariable(Jim_Interp *interp, Jim_Obj *varObjPtr, Jim_Obj *keyObjPtr, int flags) { Jim_Obj *dictObjPtr = Jim_GetVariable(interp, varObjPtr, JIM_ERRMSG); if (!dictObjPtr) return NULL; Jim_Obj *resObjPtr = NULL; int ret = Jim_DictKey(interp, dictObjPtr, keyObjPtr, &resObjPtr, JIM_NONE); if (ret != JIM_OK) Jim_SetResultFormatted(interp, "can't read \"%#s(%#s)\": %s array", varObjPtr, keyObjPtr, ret < 0 ? "variable isn't" : "no such element in"); // Update the variable to have an unshared copy else if ((flags & JIM_UNSHARED) && Jim_IsShared(dictObjPtr)) Jim_SetVariable(interp, varObjPtr, Jim_DuplicateObj(interp, dictObjPtr)); return resObjPtr; } // Helper of Jim_GetVariable() to deal with dict-syntax variable names static __device__ Jim_Obj *JimDictSugarGet(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { SetDictSubstFromAny(interp, objPtr); return JimDictExpandArrayVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, objPtr->internalRep.dictSubstValue.indexObjPtr, flags); } // --------- $var(INDEX) substitution, using a specialized object ----------- __device__ void FreeDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr); Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.indexObjPtr); } __device__ void DupDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); dupPtr->internalRep.dictSubstValue.varNameObjPtr = srcPtr->internalRep.dictSubstValue.varNameObjPtr; dupPtr->internalRep.dictSubstValue.indexObjPtr = srcPtr->internalRep.dictSubstValue.indexObjPtr; dupPtr->typePtr = &_dictSubstObjType; } // Note: The object *must* be in dict-sugar format static __device__ void SetDictSubstFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_dictSubstObjType) { Jim_Obj *varObjPtr, *keyObjPtr; if (objPtr->typePtr == &_interpolatedObjType) { // An interpolated object in dict-sugar form varObjPtr = objPtr->internalRep.dictSubstValue.varNameObjPtr; keyObjPtr = objPtr->internalRep.dictSubstValue.indexObjPtr; Jim_IncrRefCount(varObjPtr); Jim_IncrRefCount(keyObjPtr); } else JimDictSugarParseVarKey(interp, objPtr, &varObjPtr, &keyObjPtr); Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_dictSubstObjType; objPtr->internalRep.dictSubstValue.varNameObjPtr = varObjPtr; objPtr->internalRep.dictSubstValue.indexObjPtr = keyObjPtr; } } // This function is used to expand [dict get] sugar in the form of $var(INDEX). The function is mainly used by Jim_EvalObj() // to deal with tokens of type JIM_TT_DICTSUGAR. objPtr points to an object that is *guaranteed* to be in the form VARNAME(INDEX). // The 'index' part is [subst]ituted, and is used to lookup a key inside the [dict]ionary contained in variable VARNAME. static __device__ Jim_Obj *JimExpandDictSugar(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_Obj *substKeyObjPtr = NULL; SetDictSubstFromAny(interp, objPtr); if (Jim_SubstObj(interp, objPtr->internalRep.dictSubstValue.indexObjPtr, &substKeyObjPtr, JIM_NONE) != JIM_OK) return NULL; Jim_IncrRefCount(substKeyObjPtr); Jim_Obj *resObjPtr = JimDictExpandArrayVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, substKeyObjPtr, 0); Jim_DecrRefCount(interp, substKeyObjPtr); return resObjPtr; } static __device__ Jim_Obj *JimExpandExprSugar(Jim_Interp *interp, Jim_Obj *objPtr) { Jim_Obj *resultObjPtr; if (Jim_EvalExpression(interp, objPtr, &resultObjPtr) == JIM_OK) { // Note that the result has a ref count of 1, but we need a ref count of 0 resultObjPtr->refCount--; return resultObjPtr; } return NULL; } #pragma endregion // ----------------------------------------------------------------------------- // CallFrame // ----------------------------------------------------------------------------- #pragma region CallFrame static __device__ Jim_CallFrame *JimCreateCallFrame(Jim_Interp *interp, Jim_CallFrame *parent, Jim_Obj *nsObj) { Jim_CallFrame *cf; if (interp->freeFramesList) { cf = interp->freeFramesList; interp->freeFramesList = cf->next; cf->argv = NULL; cf->argc = 0; cf->procArgsObjPtr = NULL; cf->procBodyObjPtr = NULL; cf->next = NULL; cf->staticVars = NULL; cf->localCommands = NULL; cf->tailcallObj = NULL; cf->tailcallCmd = NULL; } else { cf = (Jim_CallFrame *)Jim_Alloc(sizeof(*cf)); memset(cf, 0, sizeof(*cf)); Jim_InitHashTable(&cf->vars, &JimVariablesHashTableType, interp); } cf->id = interp->callFrameEpoch++; cf->parent = parent; cf->level = (parent ? parent->level + 1 : 0); cf->nsObj = nsObj; Jim_IncrRefCount(nsObj); return cf; } static __device__ int JimDeleteLocalProcs(Jim_Interp *interp, Jim_Stack *localCommands) { // Delete any local procs if (localCommands) { Jim_Obj *cmdNameObj; while ((cmdNameObj = (Jim_Obj *)Jim_StackPop(localCommands)) != NULL) { Jim_HashTable *ht = &interp->commands; Jim_Obj *fqObjName; const char *fqname = JimQualifyName(interp, Jim_String(cmdNameObj), &fqObjName); Jim_HashEntry *he = Jim_FindHashEntry(ht, fqname); if (he) { Jim_Cmd *cmd = (Jim_Cmd *)Jim_GetHashEntryVal(he); if (cmd->prevCmd) { Jim_Cmd *prevCmd = cmd->prevCmd; cmd->prevCmd = NULL; // Delete the old command JimDecrCmdRefCount(interp, cmd); // And restore the original Jim_SetHashVal(ht, he, prevCmd); } else { Jim_DeleteHashEntry(ht, fqname); Jim_InterpIncrProcEpoch(interp); } } Jim_DecrRefCount(interp, cmdNameObj); JimFreeQualifiedName(interp, fqObjName); } Jim_FreeStack(localCommands); Jim_Free(localCommands); } return JIM_OK; } #define JIM_FCF_FULL 0 // Always free the vars hash table #define JIM_FCF_REUSE 1 // Reuse the vars hash table if possible static __device__ void JimFreeCallFrame(Jim_Interp *interp, Jim_CallFrame *cf, int action) { JimDeleteLocalProcs(interp, cf->localCommands); if (cf->procArgsObjPtr) Jim_DecrRefCount(interp, cf->procArgsObjPtr); if (cf->procBodyObjPtr) Jim_DecrRefCount(interp, cf->procBodyObjPtr); Jim_DecrRefCount(interp, cf->nsObj); if (action == JIM_FCF_FULL || cf->vars.size != JIM_HT_INITIAL_SIZE) Jim_FreeHashTable(&cf->vars); else { Jim_HashEntry **table = cf->vars.table; for (int i = 0; i < JIM_HT_INITIAL_SIZE; i++) { Jim_HashEntry *he = table[i]; while (he != NULL) { Jim_HashEntry *nextEntry = he->next; Jim_Var *varPtr = (Jim_Var *)Jim_GetHashEntryVal(he); Jim_DecrRefCount(interp, varPtr->objPtr); Jim_Free(Jim_GetHashEntryKey(he)); Jim_Free(varPtr); Jim_Free(he); table[i] = NULL; he = nextEntry; } } cf->vars.used = 0; } cf->next = interp->freeFramesList; interp->freeFramesList = cf; } #pragma endregion // ----------------------------------------------------------------------------- // References // ----------------------------------------------------------------------------- #pragma region References #ifdef JIM_REFERENCES // References HashTable Type. // Keys are unsigned long integers, dynamically allocated for now but in the future it's worth to cache this 4 bytes objects. Values are pointers to Jim_References. static __device__ void JimReferencesHTValDestructor(void *interp, void *val) { Jim_Reference *refPtr = (Jim_Reference *)val; Jim_DecrRefCount((Jim_Interp *)interp, refPtr->objPtr); if (refPtr->finalizerCmdNamePtr != NULL) Jim_DecrRefCount((Jim_Interp *)interp, refPtr->finalizerCmdNamePtr); Jim_Free(val); } static __device__ unsigned int JimReferencesHTHashFunction(const void *key) { // Only the least significant bits are used. const unsigned long *widePtr = (const unsigned long *)key; unsigned int intValue = (unsigned int)*widePtr; return Jim_IntHashFunction(intValue); } static __device__ void *JimReferencesHTKeyDup(void *privdata, const void *key) { JIM_NOTUSED(privdata); void *copy = Jim_Alloc(sizeof(unsigned long)); memcpy(copy, key, sizeof(unsigned long)); return copy; } static __device__ int JimReferencesHTKeyCompare(void *privdata, const void *key1, const void *key2) { JIM_NOTUSED(privdata); return memcmp(key1, key2, sizeof(unsigned long)) == 0; } static __device__ void JimReferencesHTKeyDestructor(void *privdata, void *key) { JIM_NOTUSED(privdata); Jim_Free(key); } __constant__ static const Jim_HashTableType JimReferencesHashTableType = { JimReferencesHTHashFunction, // hash function JimReferencesHTKeyDup, // key dup NULL, // val dup JimReferencesHTKeyCompare, // key compare JimReferencesHTKeyDestructor, // key destructor JimReferencesHTValDestructor // val destructor }; // ----------------------------------------------------------------------------- // Reference object type and References API // ----------------------------------------------------------------------------- // The string representation of references has two features in order to make the GC faster. The first is that every reference starts // with a non common character '<', in order to make the string matching faster. The second is that the reference string rep is 42 characters // in length, this means that it is not necessary to check any object with a string repr < 42, and usually there aren't many of these objects. #define JIM_REFERENCE_SPACE (35+JIM_REFERENCE_TAGLEN) static __device__ int JimFormatReference(char *buf, Jim_Reference *refPtr, unsigned long id) { const char *fmt = "<reference.<%s>.%020lu>"; sprintf(buf, fmt, refPtr->tag, id); return JIM_REFERENCE_SPACE; } static __device__ void UpdateStringOfReference(struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _referenceObjType = { "reference", NULL, NULL, UpdateStringOfReference, JIM_TYPE_REFERENCES, }; static __device__ void UpdateStringOfReference(struct Jim_Obj *objPtr) { char buf[JIM_REFERENCE_SPACE + 1]; JimFormatReference(buf, objPtr->internalRep.refValue.refPtr, objPtr->internalRep.refValue.id); JimSetStringBytes(objPtr, buf); } // returns true if 'c' is a valid reference tag character. i.e. inside the range [_a-zA-Z0-9] static __device__ int isrefchar(int c) { return (c == '_' || isalnum(c)); } static __device__ int SetReferenceFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { // Get the string representation int len; const char *str = Jim_GetString(objPtr, &len); // Check if it looks like a reference if (len < JIM_REFERENCE_SPACE) goto badformat; // Trim spaces const char *start = str; const char *end = str + len - 1; while (*start == ' ') start++; while (*end == ' ' && end > start) end--; if (end - start + 1 != JIM_REFERENCE_SPACE) goto badformat; // <reference.<1234567>.%020> if (memcmp(start, "<reference.<", 12) != 0) goto badformat; if (start[12 + JIM_REFERENCE_TAGLEN] != '>' || end[0] != '>') goto badformat; // The tag can't contain chars other than a-zA-Z0-9 + '_'. for (int i = 0; i < JIM_REFERENCE_TAGLEN; i++) if (!isrefchar(start[12 + i])) goto badformat; // Extract info from the reference. char refId[21]; memcpy(refId, start + 14 + JIM_REFERENCE_TAGLEN, 20); refId[20] = '\0'; // Try to convert the ID into an unsigned long char *endptr; unsigned long value = strtoul(refId, &endptr, 10); if (JimCheckConversion(refId, endptr) != JIM_OK) goto badformat; // Check if the reference really exists! Jim_HashEntry *he = Jim_FindHashEntry(&interp->references, &value); if (he == NULL) { Jim_SetResultFormatted(interp, "invalid reference id \"%#s\"", objPtr); return JIM_ERROR; } Jim_Reference *refPtr = (Jim_Reference *)Jim_GetHashEntryVal(he); // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_referenceObjType; objPtr->internalRep.refValue.id = value; objPtr->internalRep.refValue.refPtr = refPtr; return JIM_OK; badformat: Jim_SetResultFormatted(interp, "expected reference but got \"%#s\"", objPtr); return JIM_ERROR; } // Returns a new reference pointing to objPtr, having cmdNamePtr as finalizer command (or NULL if there is no finalizer). The returned reference object has refcount = 0. __device__ Jim_Obj *Jim_NewReference(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *tagPtr, Jim_Obj *cmdNamePtr) { // Perform the Garbage Collection if needed. Jim_CollectIfNeeded(interp); struct Jim_Reference *refPtr = (struct Jim_Reference *)Jim_Alloc(sizeof(*refPtr)); refPtr->objPtr = objPtr; Jim_IncrRefCount(objPtr); refPtr->finalizerCmdNamePtr = cmdNamePtr; if (cmdNamePtr) Jim_IncrRefCount(cmdNamePtr); unsigned long id = interp->referenceNextId++; Jim_AddHashEntry(&interp->references, &id, refPtr); Jim_Obj *refObjPtr = Jim_NewObj(interp); refObjPtr->typePtr = &_referenceObjType; refObjPtr->bytes = NULL; refObjPtr->internalRep.refValue.id = id; refObjPtr->internalRep.refValue.refPtr = refPtr; interp->referenceNextId++; // Set the tag. Trimmed at JIM_REFERENCE_TAGLEN. Everything that does not pass the 'isrefchar' test is replaced with '_' int tagLen; const char *tag = Jim_GetString(tagPtr, &tagLen); if (tagLen > JIM_REFERENCE_TAGLEN) tagLen = JIM_REFERENCE_TAGLEN; for (int i = 0; i < JIM_REFERENCE_TAGLEN; i++) if (i < tagLen && isrefchar(tag[i])) refPtr->tag[i] = tag[i]; else refPtr->tag[i] = '_'; refPtr->tag[JIM_REFERENCE_TAGLEN] = '\0'; return refObjPtr; } __device__ Jim_Reference *Jim_GetReference(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_referenceObjType && SetReferenceFromAny(interp, objPtr) == JIM_ERROR) return NULL; return objPtr->internalRep.refValue.refPtr; } __device__ int Jim_SetFinalizer(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *cmdNamePtr) { Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, objPtr)) == NULL) return JIM_ERROR; Jim_IncrRefCount(cmdNamePtr); if (refPtr->finalizerCmdNamePtr) Jim_DecrRefCount(interp, refPtr->finalizerCmdNamePtr); refPtr->finalizerCmdNamePtr = cmdNamePtr; return JIM_OK; } __device__ int Jim_GetFinalizer(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj **cmdNamePtrPtr) { Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, objPtr)) == NULL) return JIM_ERROR; *cmdNamePtrPtr = refPtr->finalizerCmdNamePtr; return JIM_OK; } // ----------------------------------------------------------------------------- // References Garbage Collection // ----------------------------------------------------------------------------- // This the hash table type for the "MARK" phase of the GC __constant__ static const Jim_HashTableType JimRefMarkHashTableType = { JimReferencesHTHashFunction, // hash function JimReferencesHTKeyDup, // key dup NULL, // val dup JimReferencesHTKeyCompare, // key compare JimReferencesHTKeyDestructor, // key destructor NULL // val destructor }; // Performs the garbage collection __device__ int Jim_Collect(Jim_Interp *interp) { int collected = 0; #ifndef JIM_BOOTSTRAP // Avoid recursive calls if (interp->lastCollectId == -1) return 0; // Jim_Collect() already running. Return just now. interp->lastCollectId = -1; // Mark all the references found into the 'mark' hash table. The references are searched in every live object that is of a type that can contain references. Jim_HashTable marks; Jim_InitHashTable(&marks, &JimRefMarkHashTableType, NULL); Jim_Obj *objPtr = interp->liveList; while (objPtr) { if (objPtr->typePtr == NULL || objPtr->typePtr->flags & JIM_TYPE_REFERENCES) { // If the object is of type reference, to get the Id is simple... if (objPtr->typePtr == &_referenceObjType) { Jim_AddHashEntry(&marks, &objPtr->internalRep.refValue.id, NULL); #ifdef JIM_DEBUG_GC printf("MARK (reference): %d refcount: %d\n", (int)objPtr->internalRep.refValue.id, objPtr->refCount); #endif objPtr = objPtr->nextObjPtr; continue; } // Get the string repr of the object we want to scan for references. const char *str, *p; int len; p = str = Jim_GetString(objPtr, &len); // Skip objects too little to contain references. if (len < JIM_REFERENCE_SPACE) { objPtr = objPtr->nextObjPtr; continue; } // Extract references from the object string repr. while (1) { if ((p = strstr(p, "<reference.<")) == NULL) break; // Check if it's a valid reference. if (len - (p - str) < JIM_REFERENCE_SPACE) break; if (p[41] != '>' || p[19] != '>' || p[20] != '.') break; for (int i = 21; i <= 40; i++) if (!isdigit(p[i])) break; // Get the ID unsigned long id = strtoul(p + 21, NULL, 10); // Ok, a reference for the given ID was found. Mark it. Jim_AddHashEntry(&marks, &id, NULL); #ifdef JIM_DEBUG_GC printf("MARK: %d\n", (int)id); #endif p += JIM_REFERENCE_SPACE; } } objPtr = objPtr->nextObjPtr; } // Run the references hash table to destroy every reference that is not referenced outside (not present in the mark HT). Jim_HashEntry *he; Jim_HashTableIterator htiter; JimInitHashTableIterator(&interp->references, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) { const unsigned long *refId = (const unsigned long *)he->key; // Check if in the mark phase we encountered this reference. if (Jim_FindHashEntry(&marks, refId) == NULL) { #ifdef JIM_DEBUG_GC printf("COLLECTING %d\n", (int)*refId); #endif collected++; // Drop the reference, but call the finalizer first if registered. Jim_Reference *refPtr = (Jim_Reference *)Jim_GetHashEntryVal(he); if (refPtr->finalizerCmdNamePtr) { char *refstr = (char *)Jim_Alloc(JIM_REFERENCE_SPACE + 1); Jim_Obj *objv[3], *oldResult; JimFormatReference(refstr, refPtr, *refId); objv[0] = refPtr->finalizerCmdNamePtr; objv[1] = Jim_NewStringObjNoAlloc(interp, refstr, JIM_REFERENCE_SPACE); objv[2] = refPtr->objPtr; // Drop the reference itself. Avoid the finaliser being freed here Jim_IncrRefCount(objv[0]); // Don't remove the reference from the hash table just yet since that will free refPtr, and hence refPtr->objPtr // Call the finalizer. Errors ignored. (should we use bgerror?) oldResult = interp->result; Jim_IncrRefCount(oldResult); Jim_EvalObjVector(interp, 3, objv); Jim_SetResult(interp, oldResult); Jim_DecrRefCount(interp, oldResult); Jim_DecrRefCount(interp, objv[0]); } Jim_DeleteHashEntry(&interp->references, refId); } } Jim_FreeHashTable(&marks); interp->lastCollectId = interp->referenceNextId; interp->lastCollectTime = time(NULL); #endif // JIM_BOOTSTRAP return collected; } #define JIM_COLLECT_ID_PERIOD 5000 #define JIM_COLLECT_TIME_PERIOD 300 __device__ void Jim_CollectIfNeeded(Jim_Interp *interp) { unsigned long elapsedId = interp->referenceNextId - interp->lastCollectId; int elapsedTime = (int)(time(NULL) - interp->lastCollectTime); if (elapsedId > JIM_COLLECT_ID_PERIOD || elapsedTime > JIM_COLLECT_TIME_PERIOD) Jim_Collect(interp); } #endif #pragma endregion __device__ int Jim_IsBigEndian(void) { union { unsigned short s; unsigned char c[2]; } uval = {0x0102}; return uval.c[0] == 1; } // ----------------------------------------------------------------------------- // Interpreter related functions // ----------------------------------------------------------------------------- #pragma region Interpreter related functions __device__ Jim_Interp *Jim_CreateInterp() { Jim_Interp *i = (Jim_Interp *)Jim_Alloc(sizeof(*i)); memset(i, 0, sizeof(*i)); i->maxCallFrameDepth = JIM_MAX_CALLFRAME_DEPTH; i->maxEvalDepth = JIM_MAX_EVAL_DEPTH; i->lastCollectTime = time(NULL); // Note that we can create objects only after the interpreter liveList and freeList pointers are initialized to NULL. Jim_InitHashTable(&i->commands, &JimCommandsHashTableType, i); #ifdef JIM_REFERENCES Jim_InitHashTable(&i->references, &JimReferencesHashTableType, i); #endif Jim_InitHashTable(&i->assocData, &JimAssocDataHashTableType, i); Jim_InitHashTable(&i->packages, &JimPackageHashTableType, NULL); i->emptyObj = Jim_NewEmptyStringObj(i); i->trueObj = Jim_NewIntObj(i, 1); i->falseObj = Jim_NewIntObj(i, 0); i->framePtr = i->topFramePtr = JimCreateCallFrame(i, NULL, i->emptyObj); i->errorFileNameObj = i->emptyObj; i->result = i->emptyObj; i->stackTrace = Jim_NewListObj(i, NULL, 0); i->unknown = Jim_NewStringObj(i, "unknown", -1); i->errorProc = i->emptyObj; i->currentScriptObj = Jim_NewEmptyStringObj(i); i->nullScriptObj = Jim_NewEmptyStringObj(i); Jim_IncrRefCount(i->emptyObj); Jim_IncrRefCount(i->errorFileNameObj); Jim_IncrRefCount(i->result); Jim_IncrRefCount(i->stackTrace); Jim_IncrRefCount(i->unknown); Jim_IncrRefCount(i->currentScriptObj); Jim_IncrRefCount(i->nullScriptObj); Jim_IncrRefCount(i->errorProc); Jim_IncrRefCount(i->trueObj); Jim_IncrRefCount(i->falseObj); // Initialize key variables every interpreter should contain Jim_SetVariableStrWithStr(i, JIM_LIBPATH, TCL_LIBRARY); Jim_SetVariableStrWithStr(i, JIM_INTERACTIVE, "0"); Jim_SetVariableStrWithStr(i, "tcl_platform(os)", TCL_PLATFORM_OS); Jim_SetVariableStrWithStr(i, "tcl_platform(platform)", TCL_PLATFORM_PLATFORM); Jim_SetVariableStrWithStr(i, "tcl_platform(pathSeparator)", TCL_PLATFORM_PATH_SEPARATOR); Jim_SetVariableStrWithStr(i, "tcl_platform(byteOrder)", Jim_IsBigEndian() ? "bigEndian" : "littleEndian"); Jim_SetVariableStrWithStr(i, "tcl_platform(threaded)", "0"); Jim_SetVariableStr(i, "tcl_platform(pointerSize)", Jim_NewIntObj(i, sizeof(void *))); Jim_SetVariableStr(i, "tcl_platform(wordSize)", Jim_NewIntObj(i, sizeof(jim_wide))); return i; } __device__ void Jim_FreeInterp(Jim_Interp *i) { // Free the active call frames list - must be done before i->commands is destroyed Jim_CallFrame *cf, *cfx; for (cf = i->framePtr; cf; cf = cfx) { cfx = cf->parent; JimFreeCallFrame(i, cf, JIM_FCF_FULL); } Jim_DecrRefCount(i, i->emptyObj); Jim_DecrRefCount(i, i->trueObj); Jim_DecrRefCount(i, i->falseObj); Jim_DecrRefCount(i, i->result); Jim_DecrRefCount(i, i->stackTrace); Jim_DecrRefCount(i, i->errorProc); Jim_DecrRefCount(i, i->unknown); Jim_DecrRefCount(i, i->errorFileNameObj); Jim_DecrRefCount(i, i->currentScriptObj); Jim_DecrRefCount(i, i->nullScriptObj); Jim_FreeHashTable(&i->commands); #ifdef JIM_REFERENCES Jim_FreeHashTable(&i->references); #endif Jim_FreeHashTable(&i->packages); Jim_Free(i->prngState); Jim_FreeHashTable(&i->assocData); // Check that the live object list is empty, otherwise there is a memory leak. Jim_Obj *objPtr, *nextObjPtr; #ifdef JIM_MAINTAINER if (i->liveList != NULL) { objPtr = i->liveList; printf("\n-------------------------------------\n"); printf("Objects still in the free list:\n"); while (objPtr) { const char *type = (objPtr->typePtr ? objPtr->typePtr->name : "string"); if (objPtr->bytes && strlen(objPtr->bytes) > 20) printf("%p (%d) %-10s: '%.20s...'\n", (void *)objPtr, objPtr->refCount, type, objPtr->bytes); else printf("%p (%d) %-10s: '%s'\n", (void *)objPtr, objPtr->refCount, type, objPtr->bytes ? objPtr->bytes : "(null)"); if (objPtr->typePtr == &_sourceObjType) printf("FILE %s LINE %d\n", Jim_String(objPtr->internalRep.sourceValue.fileNameObj), objPtr->internalRep.sourceValue.lineNumber); objPtr = objPtr->nextObjPtr; } printf("-------------------------------------\n\n"); JimPanic(1, "Live list non empty freeing the interpreter! Leak?"); } #endif // Free all the freed objects. objPtr = i->freeList; while (objPtr) { nextObjPtr = objPtr->nextObjPtr; Jim_Free(objPtr); objPtr = nextObjPtr; } // Free the free call frames list for (cf = i->freeFramesList; cf; cf = cfx) { cfx = cf->next; if (cf->vars.table) Jim_FreeHashTable(&cf->vars); Jim_Free(cf); } // Free the interpreter structure. Jim_Free(i); } // Returns the call frame relative to the level represented by levelObjPtr. If levelObjPtr == NULL, the level is assumed to be '1'. // This function accepts the 'level' argument in the form of the commands [uplevel] and [upvar]. // Returns NULL on error. // Note: for a function accepting a relative integer as level suitable for implementation of [info level ?level?], see JimGetCallFrameByInteger() __device__ Jim_CallFrame *Jim_GetCallFrameByLevel(Jim_Interp *interp, Jim_Obj *levelObjPtr) { long level; const char *str; if (levelObjPtr) { str = Jim_String(levelObjPtr); if (str[0] == '#') { char *endptr; level = jim_strtol(str + 1, &endptr); if (str[1] == '\0' || endptr[0] != '\0') level = -1; } // Convert from a relative to an absolute level else level = (Jim_GetLong(interp, levelObjPtr, &level) != JIM_OK || level < 0 ? -1 : interp->framePtr->level - level); } else { str = "1"; // Needed to format the error message. level = interp->framePtr->level - 1; } if (level == 0) return interp->topFramePtr; // Lookup if (level > 0) for (Jim_CallFrame *framePtr = interp->framePtr; framePtr; framePtr = framePtr->parent) if (framePtr->level == level) return framePtr; Jim_SetResultFormatted(interp, "bad level \"%s\"", str); return NULL; } // Similar to Jim_GetCallFrameByLevel() but the level is specified as a relative integer like in the [info level ?level?] command. static __device__ Jim_CallFrame *JimGetCallFrameByInteger(Jim_Interp *interp, Jim_Obj *levelObjPtr) { long level; if (Jim_GetLong(interp, levelObjPtr, &level) == JIM_OK) { // Convert from a relative to an absolute level if (level <= 0) level = interp->framePtr->level + level; if (level == 0) return interp->topFramePtr; // Lookup for (Jim_CallFrame *framePtr = interp->framePtr; framePtr; framePtr = framePtr->parent) if (framePtr->level == level) return framePtr; } Jim_SetResultFormatted(interp, "bad level \"%#s\"", levelObjPtr); return NULL; } static __device__ void JimResetStackTrace(Jim_Interp *interp) { Jim_DecrRefCount(interp, interp->stackTrace); interp->stackTrace = Jim_NewListObj(interp, NULL, 0); Jim_IncrRefCount(interp->stackTrace); } static __device__ void JimSetStackTrace(Jim_Interp *interp, Jim_Obj *stackTraceObj) { // Increment reference first in case these are the same object Jim_IncrRefCount(stackTraceObj); Jim_DecrRefCount(interp, interp->stackTrace); interp->stackTrace = stackTraceObj; interp->errorFlag = 1; // This is a bit ugly. If the filename of the last entry of the stack trace is empty, the next stack level should be added. int len = Jim_ListLength(interp, interp->stackTrace); if (len >= 3) if (Jim_Length(Jim_ListGetIndex(interp, interp->stackTrace, len - 2)) == 0) interp->addStackTrace = 1; } static __device__ void JimAppendStackTrace(Jim_Interp *interp, const char *procname, Jim_Obj *fileNameObj, int linenr) { if (!strcmp(procname, "unknown")) procname = ""; if (!*procname && !Jim_Length(fileNameObj)) return; // No useful info here if (Jim_IsShared(interp->stackTrace)) { Jim_DecrRefCount(interp, interp->stackTrace); interp->stackTrace = Jim_DuplicateObj(interp, interp->stackTrace); Jim_IncrRefCount(interp->stackTrace); } // If we have no procname but the previous element did, merge with that frame if (!*procname && Jim_Length(fileNameObj)) { // Just a filename. Check the previous entry int len = Jim_ListLength(interp, interp->stackTrace); if (len >= 3) { Jim_Obj *objPtr = Jim_ListGetIndex(interp, interp->stackTrace, len - 3); if (Jim_Length(objPtr)) { // Yes, the previous level had procname objPtr = Jim_ListGetIndex(interp, interp->stackTrace, len - 2); if (Jim_Length(objPtr) == 0) { // But no filename, so merge the new info with that frame ListSetIndex(interp, interp->stackTrace, len - 2, fileNameObj, 0); ListSetIndex(interp, interp->stackTrace, len - 1, Jim_NewIntObj(interp, linenr), 0); return; } } } } Jim_ListAppendElement(interp, interp->stackTrace, Jim_NewStringObj(interp, procname, -1)); Jim_ListAppendElement(interp, interp->stackTrace, fileNameObj); Jim_ListAppendElement(interp, interp->stackTrace, Jim_NewIntObj(interp, linenr)); } __device__ int Jim_SetAssocData(Jim_Interp *interp, const char *key, Jim_InterpDeleteProc * delProc, void *data) { AssocDataValue *assocEntryPtr = (AssocDataValue *)Jim_Alloc(sizeof(AssocDataValue)); assocEntryPtr->delProc = delProc; assocEntryPtr->data = data; return Jim_AddHashEntry(&interp->assocData, key, assocEntryPtr); } __device__ void *Jim_GetAssocData(Jim_Interp *interp, const char *key) { Jim_HashEntry *entryPtr = Jim_FindHashEntry(&interp->assocData, key); if (entryPtr != NULL) { AssocDataValue *assocEntryPtr = (AssocDataValue *)Jim_GetHashEntryVal(entryPtr); return assocEntryPtr->data; } return NULL; } __device__ int Jim_DeleteAssocData(Jim_Interp *interp, const char *key) { return Jim_DeleteHashEntry(&interp->assocData, key); } __device__ int Jim_GetExitCode(Jim_Interp *interp) { return interp->exitCode; } #pragma endregion // ----------------------------------------------------------------------------- // Integer object // ----------------------------------------------------------------------------- #pragma region Integer object static __device__ void UpdateStringOfInt(struct Jim_Obj *objPtr); static __device__ int SetIntFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags); __constant__ static const Jim_ObjType _intObjType = { "int", NULL, NULL, UpdateStringOfInt, JIM_TYPE_NONE, }; // A coerced double is closer to an int than a double. It is an int value temporarily masquerading as a double value. // i.e. it has the same string value as an int and Jim_GetWide() succeeds, but also Jim_GetDouble() returns the value directly. __constant__ static const Jim_ObjType _coercedDoubleObjType = { "coerced-double", NULL, NULL, UpdateStringOfInt, JIM_TYPE_NONE, }; static __device__ void UpdateStringOfInt(struct Jim_Obj *objPtr) { char buf[JIM_INTEGER_SPACE + 1]; jim_wide wideValue = JimWideValue(objPtr); int pos = 0; if (wideValue == 0) buf[pos++] = '0'; else { char tmp[JIM_INTEGER_SPACE]; int num = 0; int i; if (wideValue < 0) { buf[pos++] = '-'; i = wideValue % 10; // C89 is implementation defined as to whether (-106 % 10) is -6 or 4, whereas C99 is always -6. coverity[dead_error_line] tmp[num++] = (i > 0 ? (10 - i) : -i); wideValue /= -10; } while (wideValue) { tmp[num++] = wideValue % 10; wideValue /= 10; } for (i = 0; i < num; i++) buf[pos++] = '0' + tmp[num - i - 1]; } buf[pos] = 0; JimSetStringBytes(objPtr, buf); } static __device__ int SetIntFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { if (objPtr->typePtr == &_coercedDoubleObjType) { // Simple switch objPtr->typePtr = &_intObjType; return JIM_OK; } // Get the string representation const char *str = Jim_String(objPtr); // Try to convert into a jim_wide jim_wide wideValue; if (Jim_StringToWide(str, &wideValue, 0) != JIM_OK) { if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "expected integer but got \"%#s\"", objPtr); return JIM_ERROR; } if ((wideValue == JIM_WIDE_MIN || wideValue == JIM_WIDE_MAX) && errno == ERANGE) { Jim_SetResultString(interp, "Integer value too big to be represented", -1); return JIM_ERROR; } // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_intObjType; objPtr->internalRep.wideValue = wideValue; return JIM_OK; } #ifdef JIM_OPTIMIZATION static __device__ int JimIsWide(Jim_Obj *objPtr) { return objPtr->typePtr == &_intObjType; } #endif __device__ int Jim_GetWide(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide *widePtr) { if (objPtr->typePtr != &_intObjType && SetIntFromAny(interp, objPtr, JIM_ERRMSG) == JIM_ERROR) return JIM_ERROR; *widePtr = JimWideValue(objPtr); return JIM_OK; } // Get a wide but does not set an error if the format is bad. static __device__ int JimGetWideNoErr(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide * widePtr) { if (objPtr->typePtr != &_intObjType && SetIntFromAny(interp, objPtr, JIM_NONE) == JIM_ERROR) return JIM_ERROR; *widePtr = JimWideValue(objPtr); return JIM_OK; } __device__ int Jim_GetLong(Jim_Interp *interp, Jim_Obj *objPtr, long *longPtr) { jim_wide wideValue; int retval = Jim_GetWide(interp, objPtr, &wideValue); if (retval == JIM_OK) { *longPtr = (long)wideValue; return JIM_OK; } return JIM_ERROR; } __device__ Jim_Obj *Jim_NewIntObj(Jim_Interp *interp, jim_wide wideValue) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_intObjType; objPtr->bytes = NULL; objPtr->internalRep.wideValue = wideValue; return objPtr; } #pragma endregion // ----------------------------------------------------------------------------- // Double object // ----------------------------------------------------------------------------- #pragma region Double object #define JIM_DOUBLE_SPACE 30 static __device__ void UpdateStringOfDouble(struct Jim_Obj *objPtr); static __device__ int SetDoubleFromAny(Jim_Interp *interp, Jim_Obj *objPtr); __constant__ static const Jim_ObjType _doubleObjType = { "double", NULL, NULL, UpdateStringOfDouble, JIM_TYPE_NONE, }; #ifndef HAVE_ISNAN #undef isnan #define isnan(X) ((X) != (X)) #endif #ifndef HAVE_ISINF #undef isinf #define isinf(X) (1.0 / (X) == 0.0) #endif static __device__ void UpdateStringOfDouble(struct Jim_Obj *objPtr) { double value = objPtr->internalRep.doubleValue; if (isnan(value)) { JimSetStringBytes(objPtr, "NaN"); return; } if (isinf(value)) { if (value < 0) JimSetStringBytes(objPtr, "-Inf"); else JimSetStringBytes(objPtr, "Inf"); return; } { char buf[JIM_DOUBLE_SPACE + 1]; int len = sprintf(buf, "%.12g", value); // Add a final ".0" if necessary int i; for (i = 0; i < len; i++) { if (buf[i] == '.' || buf[i] == 'e') { #ifdef JIM_SPRINTF_DOUBLE_NEEDS_FIX // If 'buf' ends in e-0nn or e+0nn, remove the 0 after the + or - and reduce the length by 1 char *e = (char *)strchr(buf, 'e'); if (e && (e[1] == '-' || e[1] == '+') && e[2] == '0') { // Move it up e += 2; memmove(e, e + 1, len - (e - buf)); } #endif break; } } if (buf[i] == '\0') { buf[i++] = '.'; buf[i++] = '0'; buf[i] = '\0'; } JimSetStringBytes(objPtr, buf); } } static __device__ int SetDoubleFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { double doubleValue; jim_wide wideValue; // Preserve the string representation. Needed so we can convert back to int without loss const char *str = Jim_String(objPtr); #ifdef HAVE_LONG_LONG // Assume a 53 bit mantissa #define MIN_INT_IN_DOUBLE -(1LL << 53) #define MAX_INT_IN_DOUBLE -(MIN_INT_IN_DOUBLE + 1) if (objPtr->typePtr == &_intObjType && JimWideValue(objPtr) >= MIN_INT_IN_DOUBLE && JimWideValue(objPtr) <= MAX_INT_IN_DOUBLE) { // Direct conversion to coerced double objPtr->typePtr = &_coercedDoubleObjType; return JIM_OK; } else #endif if (Jim_StringToWide(str, &wideValue, 10) == JIM_OK) { // Managed to convert to an int, so we can use this as a cooerced double Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_coercedDoubleObjType; objPtr->internalRep.wideValue = wideValue; return JIM_OK; } else { // Try to convert into a double if (Jim_StringToDouble(str, &doubleValue) != JIM_OK) { Jim_SetResultFormatted(interp, "expected floating-point number but got \"%#s\"", objPtr); return JIM_ERROR; } // Free the old internal repr and set the new one. Jim_FreeIntRep(interp, objPtr); } objPtr->typePtr = &_doubleObjType; objPtr->internalRep.doubleValue = doubleValue; return JIM_OK; } __device__ int Jim_GetDouble(Jim_Interp *interp, Jim_Obj *objPtr, double *doublePtr) { if (objPtr->typePtr == &_coercedDoubleObjType) { *doublePtr = (double)JimWideValue(objPtr); return JIM_OK; } if (objPtr->typePtr != &_doubleObjType && SetDoubleFromAny(interp, objPtr) == JIM_ERROR) return JIM_ERROR; *doublePtr = (objPtr->typePtr == &_coercedDoubleObjType ? (double)JimWideValue(objPtr) : objPtr->internalRep.doubleValue); return JIM_OK; } __device__ Jim_Obj *Jim_NewDoubleObj(Jim_Interp *interp, double doubleValue) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_doubleObjType; objPtr->bytes = NULL; objPtr->internalRep.doubleValue = doubleValue; return objPtr; } #pragma endregion // ----------------------------------------------------------------------------- // List object // ----------------------------------------------------------------------------- #pragma region List object static __device__ void ListInsertElements(Jim_Obj *listPtr, int idx, int elemc, Jim_Obj *const *elemVec); static __device__ void ListAppendElement(Jim_Obj *listPtr, Jim_Obj *objPtr); static __device__ void FreeListInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupListInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void UpdateStringOfList(struct Jim_Obj *objPtr); static __device__ int SetListFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); // Note that while the elements of the list may contain references, the list object itself can't. This basically means that the // list object string representation as a whole can't contain references that are not presents in the single elements. */ __constant__ static const Jim_ObjType _listObjType = { "list", FreeListInternalRep, DupListInternalRep, UpdateStringOfList, JIM_TYPE_NONE, }; __device__ void FreeListInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { for (int i = 0; i < objPtr->internalRep.listValue.len; i++) Jim_DecrRefCount(interp, objPtr->internalRep.listValue.ele[i]); Jim_Free(objPtr->internalRep.listValue.ele); } __device__ void DupListInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); dupPtr->internalRep.listValue.len = srcPtr->internalRep.listValue.len; dupPtr->internalRep.listValue.maxLen = srcPtr->internalRep.listValue.maxLen; dupPtr->internalRep.listValue.ele = (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * srcPtr->internalRep.listValue.maxLen); memcpy(dupPtr->internalRep.listValue.ele, srcPtr->internalRep.listValue.ele, sizeof(Jim_Obj *) * srcPtr->internalRep.listValue.len); for (int i = 0; i < dupPtr->internalRep.listValue.len; i++) Jim_IncrRefCount(dupPtr->internalRep.listValue.ele[i]); dupPtr->typePtr = &_listObjType; } // The following function checks if a given string can be encoded into a list element without any kind of quoting, surrounded by braces, or using escapes to quote. #define JIM_ELESTR_SIMPLE 0 #define JIM_ELESTR_BRACE 1 #define JIM_ELESTR_QUOTE 2 static __device__ unsigned char ListElementQuotingType(const char *s, int len) { int i, level, blevel, trySimple = 1; // Try with the SIMPLE case if (len == 0) return JIM_ELESTR_BRACE; if (s[0] == '"' || s[0] == '{') { trySimple = 0; goto testbrace; } for (i = 0; i < len; i++) switch (s[i]) { case ' ': case '$': case '"': case '[': case ']': case ';': case '\\': case '\r': case '\n': case '\t': case '\f': case '\v': trySimple = 0; case '{': case '}': goto testbrace; } return JIM_ELESTR_SIMPLE; testbrace: // Test if it's possible to do with braces if (s[len - 1] == '\\') return JIM_ELESTR_QUOTE; level = 0; blevel = 0; for (i = 0; i < len; i++) { switch (s[i]) { case '{': level++; break; case '}': level--; if (level < 0) return JIM_ELESTR_QUOTE; break; case '[': blevel++; break; case ']': blevel--; break; case '\\': if (s[i + 1] == '\n') return JIM_ELESTR_QUOTE; else if (s[i + 1] != '\0') i++; break; } } if (blevel < 0) return JIM_ELESTR_QUOTE; if (level == 0) { if (!trySimple) return JIM_ELESTR_BRACE; for (i = 0; i < len; i++) switch (s[i]) { case ' ': case '$': case '"': case '[': case ']': case ';': case '\\': case '\r': case '\n': case '\t': case '\f': case '\v': return JIM_ELESTR_BRACE; } return JIM_ELESTR_SIMPLE; } return JIM_ELESTR_QUOTE; } // Backslashes-escapes the null-terminated string 's' into the buffer at 'q' The buffer must be at least strlen(s) * 2 + 1 bytes long for the worst-case scenario. // Returns the length of the result. static __device__ int BackslashQuoteString(const char *s, int len, char *q) { char *p = q; while (len--) switch (*s) { case ' ': case '$': case '"': case '[': case ']': case '{': case '}': case ';': case '\\': *p++ = '\\'; *p++ = *s++; break; case '\n': *p++ = '\\'; *p++ = 'n'; s++; break; case '\r': *p++ = '\\'; *p++ = 'r'; s++; break; case '\t': *p++ = '\\'; *p++ = 't'; s++; break; case '\f': *p++ = '\\'; *p++ = 'f'; s++; break; case '\v': *p++ = '\\'; *p++ = 'v'; s++; break; default: *p++ = *s++; break; } *p = '\0'; return (int)(p - q); } static __device__ void JimMakeListStringRep(Jim_Obj *objPtr, Jim_Obj **objv, int objc) { #define STATIC_QUOTING_LEN 32 int i; const char *strRep; // Estimate the space needed unsigned char staticQuoting[STATIC_QUOTING_LEN]; unsigned char *quotingType = (objc > STATIC_QUOTING_LEN ? (unsigned char *)Jim_Alloc(objc) : staticQuoting); int bufLen = 0; for (i = 0; i < objc; i++) { int len; strRep = Jim_GetString(objv[i], &len); quotingType[i] = ListElementQuotingType(strRep, len); switch (quotingType[i]) { case JIM_ELESTR_SIMPLE: if (i != 0 || strRep[0] != '#') { bufLen += len; break; } // Special case '#' on first element needs braces quotingType[i] = JIM_ELESTR_BRACE; // fall through case JIM_ELESTR_BRACE: bufLen += len + 2; break; case JIM_ELESTR_QUOTE: bufLen += len * 2; break; } bufLen++; // elements separator } bufLen++; // Generate the string rep char *p = objPtr->bytes = (char *)Jim_Alloc(bufLen + 1); int realLength = 0; for (i = 0; i < objc; i++) { int len, qlen; strRep = Jim_GetString(objv[i], &len); switch (quotingType[i]) { case JIM_ELESTR_SIMPLE: memcpy(p, strRep, len); p += len; realLength += len; break; case JIM_ELESTR_BRACE: *p++ = '{'; memcpy(p, strRep, len); p += len; *p++ = '}'; realLength += len + 2; break; case JIM_ELESTR_QUOTE: if (i == 0 && strRep[0] == '#') { *p++ = '\\'; realLength++; } qlen = BackslashQuoteString(strRep, len, p); p += qlen; realLength += qlen; break; } // Add a separating space if (i + 1 != objc) { *p++ = ' '; realLength++; } } *p = '\0'; // nul term objPtr->length = realLength; if (quotingType != staticQuoting) Jim_Free(quotingType); } static __device__ void UpdateStringOfList(struct Jim_Obj *objPtr) { JimMakeListStringRep(objPtr, objPtr->internalRep.listValue.ele, objPtr->internalRep.listValue.len); } static __device__ int SetListFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { if (objPtr->typePtr == &_listObjType) return JIM_OK; // Optimise dict -> list for object with no string rep. Note that this may only save a little time, but it also preserves any source location of the dict elements which can be very useful if (Jim_IsDict(objPtr) && objPtr->bytes == NULL) { int len; Jim_Obj **listObjPtrPtr = JimDictPairs(objPtr, &len); for (int i = 0; i < len; i++) Jim_IncrRefCount(listObjPtrPtr[i]); // Now just switch the internal rep Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_listObjType; objPtr->internalRep.listValue.len = len; objPtr->internalRep.listValue.maxLen = len; objPtr->internalRep.listValue.ele = listObjPtrPtr; return JIM_OK; } // Try to preserve information about filename / line number Jim_Obj *fileNameObj; int linenr; if (objPtr->typePtr == &_sourceObjType) { fileNameObj = objPtr->internalRep.sourceValue.fileNameObj; linenr = objPtr->internalRep.sourceValue.lineNumber; } else { fileNameObj = interp->emptyObj; linenr = 1; } Jim_IncrRefCount(fileNameObj); // Get the string representation int strLen; const char *str = Jim_GetString(objPtr, &strLen); // Free the old internal repr just now and initialize the new one just now. The string->list conversion can't fail. Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_listObjType; objPtr->internalRep.listValue.len = 0; objPtr->internalRep.listValue.maxLen = 0; objPtr->internalRep.listValue.ele = NULL; // Convert into a list if (strLen) { struct JimParserCtx parser; JimParserInit(&parser, str, strLen, linenr); while (!parser.eof) { JimParseList(&parser); if (parser.tt != JIM_TT_STR && parser.tt != JIM_TT_ESC) continue; Jim_Obj *elementPtr = JimParserGetTokenObj(interp, &parser); JimSetSourceInfo(interp, elementPtr, fileNameObj, parser.tline); ListAppendElement(objPtr, elementPtr); } } Jim_DecrRefCount(interp, fileNameObj); return JIM_OK; } __device__ Jim_Obj *Jim_NewListObj(Jim_Interp *interp, Jim_Obj *const *elements, int len) { Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_listObjType; objPtr->bytes = NULL; objPtr->internalRep.listValue.ele = NULL; objPtr->internalRep.listValue.len = 0; objPtr->internalRep.listValue.maxLen = 0; if (len) ListInsertElements(objPtr, 0, len, elements); return objPtr; } // Return a vector of Jim_Obj with the elements of a Jim list, and the length of the vector. Note that the user of this function should make // sure that the list object can't shimmer while the vector returned is in use, this vector is the one stored inside the internal representation // of the list object. This function is not exported, extensions should always access to the List object elements using Jim_ListIndex(). static __device__ void JimListGetElements(Jim_Interp *interp, Jim_Obj *listObj, int *listLen, Jim_Obj ***listVec) { *listLen = Jim_ListLength(interp, listObj); *listVec = listObj->internalRep.listValue.ele; } // Sorting uses ints, but commands may return wide static __device__ int JimSign(jim_wide w) { if (w == 0) return 0; else if (w < 0) return -1; return 1; } // ListSortElements type values struct lsort_info { jmp_buf jmpbuf; Jim_Obj *command; Jim_Interp *interp; enum { JIM_LSORT_ASCII, JIM_LSORT_NOCASE, JIM_LSORT_INTEGER, JIM_LSORT_REAL, JIM_LSORT_COMMAND } type; int order; int index; int indexed; int unique; int (*subfn)(Jim_Obj **, Jim_Obj **); }; static __device__ struct lsort_info *sort_info; static __device__ int ListSortIndexHelper(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { Jim_Obj *lObj, *rObj; if (Jim_ListIndex(sort_info->interp, *lhsObj, sort_info->index, &lObj, JIM_ERRMSG) != JIM_OK || Jim_ListIndex(sort_info->interp, *rhsObj, sort_info->index, &rObj, JIM_ERRMSG) != JIM_OK) longjmp(sort_info->jmpbuf, JIM_ERROR); return sort_info->subfn(&lObj, &rObj); } // Sort the internal rep of a list. static __device__ int ListSortString(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { return Jim_StringCompareObj(sort_info->interp, *lhsObj, *rhsObj, 0) * sort_info->order; } static __device__ int ListSortStringNoCase(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { return Jim_StringCompareObj(sort_info->interp, *lhsObj, *rhsObj, 1) * sort_info->order; } static __device__ int ListSortInteger(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { jim_wide lhs = 0, rhs = 0; if (Jim_GetWide(sort_info->interp, *lhsObj, &lhs) != JIM_OK || Jim_GetWide(sort_info->interp, *rhsObj, &rhs) != JIM_OK) longjmp(sort_info->jmpbuf, JIM_ERROR); return JimSign(lhs - rhs) * sort_info->order; } static __device__ int ListSortReal(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { double lhs = 0, rhs = 0; if (Jim_GetDouble(sort_info->interp, *lhsObj, &lhs) != JIM_OK || Jim_GetDouble(sort_info->interp, *rhsObj, &rhs) != JIM_OK) longjmp(sort_info->jmpbuf, JIM_ERROR); if (lhs == rhs) return 0; if (lhs > rhs) return sort_info->order; return -sort_info->order; } static __device__ int ListSortCommand(Jim_Obj **lhsObj, Jim_Obj **rhsObj) { // This must be a valid list Jim_Obj *compare_script = Jim_DuplicateObj(sort_info->interp, sort_info->command); Jim_ListAppendElement(sort_info->interp, compare_script, *lhsObj); Jim_ListAppendElement(sort_info->interp, compare_script, *rhsObj); int rc = Jim_EvalObj(sort_info->interp, compare_script); jim_wide ret = 0; if (rc != JIM_OK || Jim_GetWide(sort_info->interp, Jim_GetResult(sort_info->interp), &ret) != JIM_OK) longjmp(sort_info->jmpbuf, rc); return JimSign(ret) * sort_info->order; } // Remove duplicate elements from the (sorted) list in-place, according to the comparison function, comp. // Note that the last unique value is kept, not the first static __device__ void ListRemoveDuplicates(Jim_Obj *listObjPtr, int (*comp)(Jim_Obj **lhs, Jim_Obj **rhs)) { int src; int dst = 0; Jim_Obj **ele = listObjPtr->internalRep.listValue.ele; for (src = 1; src < listObjPtr->internalRep.listValue.len; src++) { // Match, so replace the dest with the current source if (comp(&ele[dst], &ele[src]) == 0) Jim_DecrRefCount(sort_info->interp, ele[dst]); // No match, so keep the current source and move to the next destination else dst++; ele[dst] = ele[src]; } // At end of list, keep the final element ele[++dst] = ele[src]; // Set the new length listObjPtr->internalRep.listValue.len = dst; } // Sort a list *in place*. MUST be called with a non-shared list. static __device__ int ListSortElements(Jim_Interp *interp, Jim_Obj *listObjPtr, struct lsort_info *info) { JimPanic(Jim_IsShared(listObjPtr), "ListSortElements called with shared object"); SetListFromAny(interp, listObjPtr); // Allow lsort to be called reentrantly struct lsort_info *prev_info = sort_info; sort_info = info; Jim_Obj **vector = listObjPtr->internalRep.listValue.ele; int len = listObjPtr->internalRep.listValue.len; typedef int (qsort_comparator)(const void*,const void*); int (*fn)(Jim_Obj**,Jim_Obj**); switch (info->type) { case lsort_info::JIM_LSORT_ASCII: fn = ListSortString; break; case lsort_info::JIM_LSORT_NOCASE: fn = ListSortStringNoCase; break; case lsort_info::JIM_LSORT_INTEGER: fn = ListSortInteger; break; case lsort_info::JIM_LSORT_REAL: fn = ListSortReal; break; case lsort_info::JIM_LSORT_COMMAND: fn = ListSortCommand; break; default: fn = NULL; // avoid warning JimPanic(1, "ListSort called with invalid sort type"); } if (info->indexed) { // Need to interpose a "list index" function info->subfn = fn; fn = ListSortIndexHelper; } int rc; if ((rc = setjmp(info->jmpbuf)) == 0) { qsort(vector, len, sizeof(Jim_Obj *), (qsort_comparator *)fn); if (info->unique && len > 1) ListRemoveDuplicates(listObjPtr, fn); Jim_InvalidateStringRep(listObjPtr); } sort_info = prev_info; return rc; } // This is the low-level function to insert elements into a list. The higher-level Jim_ListInsertElements() performs shared object // check and invalidates the string repr. This version is used in the internals of the List Object and is not exported. // NOTE: this function can be called only against objects with internal type of List. // An insertion point (idx) of -1 means end-of-list. static __device__ void ListInsertElements(Jim_Obj *listPtr, int idx, int elemc, Jim_Obj *const *elemVec) { int currentLen = listPtr->internalRep.listValue.len; int requiredLen = currentLen + elemc; if (requiredLen > listPtr->internalRep.listValue.maxLen) { // Don't do allocations of under 4 pointers. if (requiredLen < 2) requiredLen = 4; else requiredLen *= 2; listPtr->internalRep.listValue.ele = (Jim_Obj **)Jim_Realloc(listPtr->internalRep.listValue.ele, sizeof(Jim_Obj *) * requiredLen); listPtr->internalRep.listValue.maxLen = requiredLen; } if (idx < 0) idx = currentLen; Jim_Obj **point = listPtr->internalRep.listValue.ele + idx; memmove(point + elemc, point, (currentLen - idx) * sizeof(Jim_Obj *)); for (int i = 0; i < elemc; ++i) { point[i] = elemVec[i]; Jim_IncrRefCount(point[i]); } listPtr->internalRep.listValue.len += elemc; } // Convenience call to ListInsertElements() to append a single element. static __device__ void ListAppendElement(Jim_Obj *listPtr, Jim_Obj *objPtr) { ListInsertElements(listPtr, -1, 1, &objPtr); } // Appends every element of appendListPtr into listPtr. Both have to be of the list type. Convenience call to ListInsertElements() static __device__ void ListAppendList(Jim_Obj *listPtr, Jim_Obj *appendListPtr) { ListInsertElements(listPtr, -1, appendListPtr->internalRep.listValue.len, appendListPtr->internalRep.listValue.ele); } __device__ void Jim_ListAppendElement(Jim_Interp *interp, Jim_Obj *listPtr, Jim_Obj *objPtr) { JimPanic(Jim_IsShared(listPtr), "Jim_ListAppendElement called with shared object"); SetListFromAny(interp, listPtr); Jim_InvalidateStringRep(listPtr); ListAppendElement(listPtr, objPtr); } __device__ void Jim_ListAppendList(Jim_Interp *interp, Jim_Obj *listPtr, Jim_Obj *appendListPtr) { JimPanic(Jim_IsShared(listPtr), "Jim_ListAppendList called with shared object"); SetListFromAny(interp, listPtr); SetListFromAny(interp, appendListPtr); Jim_InvalidateStringRep(listPtr); ListAppendList(listPtr, appendListPtr); } __device__ int Jim_ListLength(Jim_Interp *interp, Jim_Obj *objPtr) { SetListFromAny(interp, objPtr); return objPtr->internalRep.listValue.len; } __device__ void Jim_ListInsertElements(Jim_Interp *interp, Jim_Obj *listPtr, int idx, int objc, Jim_Obj *const *objVec) { JimPanic(Jim_IsShared(listPtr), "Jim_ListInsertElement called with shared object"); SetListFromAny(interp, listPtr); if (idx >= 0 && idx > listPtr->internalRep.listValue.len) idx = listPtr->internalRep.listValue.len; else if (idx < 0) idx = 0; Jim_InvalidateStringRep(listPtr); ListInsertElements(listPtr, idx, objc, objVec); } __device__ Jim_Obj *Jim_ListGetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx) { SetListFromAny(interp, listPtr); if ((idx >= 0 && idx >= listPtr->internalRep.listValue.len) || (idx < 0 && (-idx - 1) >= listPtr->internalRep.listValue.len)) return NULL; if (idx < 0) idx = listPtr->internalRep.listValue.len + idx; return listPtr->internalRep.listValue.ele[idx]; } __device__ int Jim_ListIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx, Jim_Obj **objPtrPtr, int flags) { *objPtrPtr = Jim_ListGetIndex(interp, listPtr, idx); if (*objPtrPtr == NULL) { if (flags & JIM_ERRMSG) Jim_SetResultString(interp, "list index out of range", -1); return JIM_ERROR; } return JIM_OK; } static __device__ int ListSetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx, Jim_Obj *newObjPtr, int flags) { SetListFromAny(interp, listPtr); if ((idx >= 0 && idx >= listPtr->internalRep.listValue.len) || (idx < 0 && (-idx - 1) >= listPtr->internalRep.listValue.len)) { if (flags & JIM_ERRMSG) Jim_SetResultString(interp, "list index out of range", -1); return JIM_ERROR; } if (idx < 0) idx = listPtr->internalRep.listValue.len + idx; Jim_DecrRefCount(interp, listPtr->internalRep.listValue.ele[idx]); listPtr->internalRep.listValue.ele[idx] = newObjPtr; Jim_IncrRefCount(newObjPtr); return JIM_OK; } // Modify the list stored in the variable named 'varNamePtr' setting the element specified by the 'indexc' indexes objects in 'indexv', with the new element 'newObjptr'. (implements the [lset] command) __device__ int Jim_ListSetIndex(Jim_Interp *interp, Jim_Obj *varNamePtr, Jim_Obj *const *indexv, int indexc, Jim_Obj *newObjPtr) { Jim_Obj *varObjPtr, *objPtr; varObjPtr = objPtr = Jim_GetVariable(interp, varNamePtr, JIM_ERRMSG | JIM_UNSHARED); if (objPtr == NULL) return JIM_ERROR; int shared; if ((shared = Jim_IsShared(objPtr))) varObjPtr = objPtr = Jim_DuplicateObj(interp, objPtr); int idx; for (int i = 0; i < indexc - 1; i++) { Jim_Obj *listObjPtr = objPtr; if (Jim_GetIndex(interp, indexv[i], &idx) != JIM_OK) goto err; if (Jim_ListIndex(interp, listObjPtr, idx, &objPtr, JIM_ERRMSG) != JIM_OK) goto err; if (Jim_IsShared(objPtr)) { objPtr = Jim_DuplicateObj(interp, objPtr); ListSetIndex(interp, listObjPtr, idx, objPtr, JIM_NONE); } Jim_InvalidateStringRep(listObjPtr); } if (Jim_GetIndex(interp, indexv[indexc - 1], &idx) != JIM_OK) goto err; if (ListSetIndex(interp, objPtr, idx, newObjPtr, JIM_ERRMSG) == JIM_ERROR) goto err; Jim_InvalidateStringRep(objPtr); Jim_InvalidateStringRep(varObjPtr); if (Jim_SetVariable(interp, varNamePtr, varObjPtr) != JIM_OK) goto err; Jim_SetResult(interp, varObjPtr); return JIM_OK; err: if (shared) Jim_FreeNewObj(interp, varObjPtr); return JIM_ERROR; } __device__ Jim_Obj *Jim_ListJoin(Jim_Interp *interp, Jim_Obj *listObjPtr, const char *joinStr, int joinStrLen) { int listLen = Jim_ListLength(interp, listObjPtr); Jim_Obj *resObjPtr = Jim_NewEmptyStringObj(interp); for (int i = 0; i < listLen;) { Jim_AppendObj(interp, resObjPtr, Jim_ListGetIndex(interp, listObjPtr, i)); if (++i != listLen) Jim_AppendString(interp, resObjPtr, joinStr, joinStrLen); } return resObjPtr; } __device__ Jim_Obj *Jim_ConcatObj(Jim_Interp *interp, int objc, Jim_Obj *const *objv) { int i; // If all the objects in objv are lists, it's possible to return a list as result, that's the concatenation of all the lists. for (i = 0; i < objc; i++) if (!Jim_IsList(objv[i])) break; if (i == objc) { Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); for (i = 0; i < objc; i++) ListAppendList(objPtr, objv[i]); return objPtr; } else { // Else... we have to glue strings together int len = 0, objLen; // Compute the length for (i = 0; i < objc; i++) len += Jim_Length(objv[i]); if (objc) len += objc - 1; // Create the string rep, and a string object holding it. char *bytes, *p; p = bytes = (char *)Jim_Alloc(len + 1); for (i = 0; i < objc; i++) { const char *s = Jim_GetString(objv[i], &objLen); // Remove leading space while (objLen && isspace(*s)) { s++; objLen--; len--; } // And trailing space & Handle trailing backslash-space case while (objLen && isspace(s[objLen - 1])) { if (objLen > 1 && s[objLen - 2] == '\\') break; objLen--; len--; } memcpy(p, s, objLen); p += objLen; if (i + 1 != objc) { if (objLen) *p++ = ' '; // Drop the space calculated for this element that is instead null. else len--; } } *p = '\0'; return Jim_NewStringObjNoAlloc(interp, bytes, len); } } // Returns a list composed of the elements in the specified range. first and start are directly accepted as Jim_Objects and processed for the end?-index? case. __device__ Jim_Obj *Jim_ListRange(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) { int first, last; if (Jim_GetIndex(interp, firstObjPtr, &first) != JIM_OK || Jim_GetIndex(interp, lastObjPtr, &last) != JIM_OK) return NULL; int len = Jim_ListLength(interp, listObjPtr); // will convert into list first = JimRelToAbsIndex(len, first); last = JimRelToAbsIndex(len, last); int rangeLen; JimRelToAbsRange(len, &first, &last, &rangeLen); if (first == 0 && last == len) return listObjPtr; return Jim_NewListObj(interp, listObjPtr->internalRep.listValue.ele + first, rangeLen); } #pragma endregion // ----------------------------------------------------------------------------- // Dict object // ----------------------------------------------------------------------------- #pragma region Dict object static __device__ void FreeDictInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupDictInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void UpdateStringOfDict(struct Jim_Obj *objPtr); static __device__ int SetDictFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); // Dict HashTable Type. // Keys and Values are Jim objects. static __device__ unsigned int JimObjectHTHashFunction(const void *key) { int len; const char *str = Jim_GetString((Jim_Obj *)key, &len); return Jim_GenHashFunction((const unsigned char *)str, len); } static __device__ int JimObjectHTKeyCompare(void *privdata, const void *key1, const void *key2) { return Jim_StringEqObj((Jim_Obj *)key1, (Jim_Obj *)key2); } static __device__ void *JimObjectHTKeyValDup(void *privdata, const void *val) { Jim_IncrRefCount((Jim_Obj *)val); return (void *)val; } static __device__ void JimObjectHTKeyValDestructor(void *interp, void *val) { Jim_DecrRefCount((Jim_Interp *)interp, (Jim_Obj *)val); } __constant__ static const Jim_HashTableType JimDictHashTableType = { JimObjectHTHashFunction, // hash function JimObjectHTKeyValDup, // key dup JimObjectHTKeyValDup, // val dup JimObjectHTKeyCompare, // key compare JimObjectHTKeyValDestructor, // key destructor JimObjectHTKeyValDestructor // val destructor }; // Note that while the elements of the dict may contain references, the list object itself can't. This basically means that the // dict object string representation as a whole can't contain references that are not presents in the single elements. */ __constant__ static const Jim_ObjType _dictObjType = { "dict", FreeDictInternalRep, DupDictInternalRep, UpdateStringOfDict, JIM_TYPE_NONE, }; __device__ void FreeDictInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { JIM_NOTUSED(interp); Jim_FreeHashTable((Jim_HashTable *)objPtr->internalRep.ptr); Jim_Free(objPtr->internalRep.ptr); } __device__ void DupDictInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { // Create a new hash table Jim_HashTable *ht = (Jim_HashTable *)srcPtr->internalRep.ptr; Jim_HashTable *dupHt = (Jim_HashTable *)Jim_Alloc(sizeof(*dupHt)); Jim_InitHashTable(dupHt, &JimDictHashTableType, interp); if (ht->size != 0) Jim_ExpandHashTable(dupHt, ht->size); // Copy every element from the source to the dup hash table Jim_HashTableIterator htiter; Jim_HashEntry *he; JimInitHashTableIterator(ht, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) Jim_AddHashEntry(dupHt, he->key, he->u.val); dupPtr->internalRep.ptr = dupHt; dupPtr->typePtr = &_dictObjType; } static __device__ Jim_Obj **JimDictPairs(Jim_Obj *dictPtr, int *len) { Jim_HashTable *ht = (Jim_HashTable *)dictPtr->internalRep.ptr; // Turn the hash table into a flat vector of Jim_Objects. Jim_Obj **objv = (Jim_Obj **)Jim_Alloc((ht->used * 2) * sizeof(Jim_Obj *)); Jim_HashTableIterator htiter; Jim_HashEntry *he; JimInitHashTableIterator(ht, &htiter); int i = 0; while ((he = Jim_NextHashEntry(&htiter)) != NULL) { objv[i++] = (Jim_Obj *)Jim_GetHashEntryKey(he); objv[i++] = (Jim_Obj *)Jim_GetHashEntryVal(he); } *len = i; return objv; } static __device__ void UpdateStringOfDict(struct Jim_Obj *objPtr) { // Turn the hash table into a flat vector of Jim_Objects. int len; Jim_Obj **objv = JimDictPairs(objPtr, &len); // And now generate the string rep as a list JimMakeListStringRep(objPtr, objv, len); Jim_Free(objv); } static __device__ int SetDictFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { if (objPtr->typePtr == &_dictObjType) return JIM_OK; // A shared list, so get the string representation now to avoid changing the order in case of fast conversion to dict. if (Jim_IsList(objPtr) && Jim_IsShared(objPtr)) Jim_String(objPtr); // For simplicity, convert a non-list object to a list and then to a dict int listlen = Jim_ListLength(interp, objPtr); if (listlen % 2) { Jim_SetResultString(interp, "missing value to go with key", -1); return JIM_ERROR; } else { // Converting from a list to a dict can't fail Jim_HashTable *ht = (Jim_HashTable *)Jim_Alloc(sizeof(*ht)); Jim_InitHashTable(ht, &JimDictHashTableType, interp); for (int i = 0; i < listlen; i += 2) { Jim_Obj *keyObjPtr = Jim_ListGetIndex(interp, objPtr, i); Jim_Obj *valObjPtr = Jim_ListGetIndex(interp, objPtr, i + 1); Jim_ReplaceHashEntry(ht, keyObjPtr, valObjPtr); } Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_dictObjType; objPtr->internalRep.ptr = ht; return JIM_OK; } } // Dict object API // Add an element to a dict. objPtr must be of the "dict" type. The higher-level exported function is Jim_DictAddElement(). // If an element with the specified key already exists, the value associated is replaced with the new one. // if valueObjPtr == NULL, the key is instead removed if it exists. static __device__ int DictAddElement(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *keyObjPtr, Jim_Obj *valueObjPtr) { Jim_HashTable *ht = (Jim_HashTable *)objPtr->internalRep.ptr; if (valueObjPtr == NULL) // unset return Jim_DeleteHashEntry(ht, keyObjPtr); Jim_ReplaceHashEntry(ht, keyObjPtr, valueObjPtr); return JIM_OK; } // Add an element, higher-level interface for DictAddElement(). // If valueObjPtr == NULL, the key is removed if it exists. __device__ int Jim_DictAddElement(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *keyObjPtr, Jim_Obj *valueObjPtr) { JimPanic(Jim_IsShared(objPtr), "Jim_DictAddElement called with shared object"); if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_InvalidateStringRep(objPtr); return DictAddElement(interp, objPtr, keyObjPtr, valueObjPtr); } __device__ Jim_Obj *Jim_NewDictObj(Jim_Interp *interp, Jim_Obj *const *elements, int len) { JimPanic(len % 2, "Jim_NewDictObj() 'len' argument must be even"); Jim_Obj *objPtr = Jim_NewObj(interp); objPtr->typePtr = &_dictObjType; objPtr->bytes = NULL; objPtr->internalRep.ptr = Jim_Alloc(sizeof(Jim_HashTable)); Jim_InitHashTable((Jim_HashTable *)objPtr->internalRep.ptr, &JimDictHashTableType, interp); for (int i = 0; i < len; i += 2) DictAddElement(interp, objPtr, elements[i], elements[i + 1]); return objPtr; } // Return the value associated to the specified dict key. Returns JIM_OK if OK, JIM_ERROR if entry not found or -1 if can't create dict value // Sets *objPtrPtr to non-NULL only upon success. __device__ int Jim_DictKey(Jim_Interp *interp, Jim_Obj *dictPtr, Jim_Obj *keyPtr, Jim_Obj **objPtrPtr, int flags) { if (SetDictFromAny(interp, dictPtr) != JIM_OK) return -1; Jim_HashTable *ht = (Jim_HashTable *)dictPtr->internalRep.ptr; Jim_HashEntry *he; if ((he = Jim_FindHashEntry(ht, keyPtr)) == NULL) { if (flags & JIM_ERRMSG) Jim_SetResultFormatted(interp, "key \"%#s\" not known in dictionary", keyPtr); return JIM_ERROR; } *objPtrPtr = (Jim_Obj *)he->u.val; return JIM_OK; } // Return an allocated array of key/value pairs for the dictionary. Stores the length in *len __device__ int Jim_DictPairs(Jim_Interp *interp, Jim_Obj *dictPtr, Jim_Obj ***objPtrPtr, int *len) { if (SetDictFromAny(interp, dictPtr) != JIM_OK) return JIM_ERROR; *objPtrPtr = JimDictPairs(dictPtr, len); return JIM_OK; } // Return the value associated to the specified dict keys __device__ int Jim_DictKeysVector(Jim_Interp *interp, Jim_Obj *dictPtr, Jim_Obj *const *keyv, int keyc, Jim_Obj **objPtrPtr, int flags) { if (keyc == 0) { *objPtrPtr = dictPtr; return JIM_OK; } for (int i = 0; i < keyc; i++) { Jim_Obj *objPtr; int rc = Jim_DictKey(interp, dictPtr, keyv[i], &objPtr, flags); if (rc != JIM_OK) return rc; dictPtr = objPtr; } *objPtrPtr = dictPtr; return JIM_OK; } // Modify the dict stored into the variable named 'varNamePtr' setting the element specified by the 'keyc' keys objects in 'keyv', with the new value of the element 'newObjPtr'. // If newObjPtr == NULL the operation is to remove the given key from the dictionary. // If flags & JIM_ERRMSG, then failure to remove the key is considered an error and JIM_ERROR is returned. Otherwise it is ignored and JIM_OK is returned. __device__ int Jim_SetDictKeysVector(Jim_Interp *interp, Jim_Obj *varNamePtr, Jim_Obj *const *keyv, int keyc, Jim_Obj *newObjPtr, int flags) { Jim_Obj *varObjPtr, *objPtr; varObjPtr = objPtr = Jim_GetVariable(interp, varNamePtr, flags); if (objPtr == NULL) { // Cannot remove a key from non existing var if (newObjPtr == NULL && (flags & JIM_MUSTEXIST)) return JIM_ERROR; varObjPtr = objPtr = Jim_NewDictObj(interp, NULL, 0); if (Jim_SetVariable(interp, varNamePtr, objPtr) != JIM_OK) { Jim_FreeNewObj(interp, varObjPtr); return JIM_ERROR; } } int shared; if ((shared = Jim_IsShared(objPtr))) varObjPtr = objPtr = Jim_DuplicateObj(interp, objPtr); Jim_Obj *dictObjPtr; for (int i = 0; i < keyc; i++) { dictObjPtr = objPtr; // Check if it's a valid dictionary if (SetDictFromAny(interp, dictObjPtr) != JIM_OK) goto err; if (i == keyc - 1) { // Last key: Note that error on unset with missing last key is OK if (Jim_DictAddElement(interp, objPtr, keyv[keyc - 1], newObjPtr) != JIM_OK) if (newObjPtr || (flags & JIM_MUSTEXIST)) goto err; break; } // Check if the given key exists Jim_InvalidateStringRep(dictObjPtr); if (Jim_DictKey(interp, dictObjPtr, keyv[i], &objPtr, newObjPtr ? JIM_NONE : JIM_ERRMSG) == JIM_OK) { // This key exists at the current level. Make sure it's not shared! if (Jim_IsShared(objPtr)) { objPtr = Jim_DuplicateObj(interp, objPtr); DictAddElement(interp, dictObjPtr, keyv[i], objPtr); } } else { // Key not found. If it's an [unset] operation this is an error. Only the last key may not exist. if (newObjPtr == NULL) goto err; // Otherwise set an empty dictionary as key's value objPtr = Jim_NewDictObj(interp, NULL, 0); DictAddElement(interp, dictObjPtr, keyv[i], objPtr); } } // XXX: Is this necessary? Jim_InvalidateStringRep(objPtr); Jim_InvalidateStringRep(varObjPtr); if (Jim_SetVariable(interp, varNamePtr, varObjPtr) != JIM_OK) goto err; Jim_SetResult(interp, varObjPtr); return JIM_OK; err: if (shared) Jim_FreeNewObj(interp, varObjPtr); return JIM_ERROR; } #pragma endregion // ----------------------------------------------------------------------------- // Index object // ----------------------------------------------------------------------------- #pragma region Index object static __device__ void UpdateStringOfIndex(struct Jim_Obj *objPtr); static __device__ int SetIndexFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _indexObjType = { "index", NULL, NULL, UpdateStringOfIndex, JIM_TYPE_NONE, }; static __device__ void UpdateStringOfIndex(struct Jim_Obj *objPtr) { if (objPtr->internalRep.intValue == -1) JimSetStringBytes(objPtr, "end"); else { char buf[JIM_INTEGER_SPACE + 1]; if (objPtr->internalRep.intValue >= 0) sprintf(buf, "%d", objPtr->internalRep.intValue); // Must be <= -2 else sprintf(buf, "end%d", objPtr->internalRep.intValue + 1); JimSetStringBytes(objPtr, buf); } } static __device__ int SetIndexFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { // Get the string representation const char *str = Jim_String(objPtr); // Try to convert into an index int idx, end = 0; char *endptr; if (!strncmp(str, "end", 3)) { end = 1; str += 3; idx = 0; } else { idx = jim_strtol(str, &endptr); if (endptr == str) goto badindex; str = endptr; } // Now str may include or +<num> or -<num> if (*str == '+' || *str == '-') { int sign = (*str == '+' ? 1 : -1); idx += sign * jim_strtol(++str, &endptr); if (str == endptr || *endptr) goto badindex; str = endptr; } // The only thing left should be spaces while (isspace(*str)) { str++; } if (*str) goto badindex; // end-1 is repesented as -2 if (end) idx = (idx > 0 ? INT_MAX : idx--); else if (idx < 0) idx = -INT_MAX; // Free the old internal repr and set the new one Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_indexObjType; objPtr->internalRep.intValue = idx; return JIM_OK; badindex: Jim_SetResultFormatted(interp, "bad index \"%#s\": must be integer?[+-]integer? or end?[+-]integer?", objPtr); return JIM_ERROR; } __device__ int Jim_GetIndex(Jim_Interp *interp, Jim_Obj *objPtr, int *indexPtr) { // Avoid shimmering if the object is an integer if (objPtr->typePtr == &_intObjType) { jim_wide val = JimWideValue(objPtr); if (val < 0) *indexPtr = -INT_MAX; else if (val > INT_MAX) *indexPtr = INT_MAX; else *indexPtr = (int)val; return JIM_OK; } if (objPtr->typePtr != &_indexObjType && SetIndexFromAny(interp, objPtr) == JIM_ERROR) return JIM_ERROR; *indexPtr = objPtr->internalRep.intValue; return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Return Code Object // ----------------------------------------------------------------------------- #pragma region Return Code Object // NOTE: These must be kept in the same order as JIM_OK, JIM_ERROR, ... __constant__ static const char * const jimReturnCodes[] = { "ok", "error", "return", "break", "continue", "signal", "exit", "eval", NULL }; #define jimReturnCodesSize (sizeof(jimReturnCodes)/sizeof(*jimReturnCodes)) __constant__ static const Jim_ObjType _returnCodeObjType = { "return-code", NULL, NULL, NULL, JIM_TYPE_NONE, }; // Converts a (standard) return code to a string. Returns "?" for non-standard return codes. __device__ const char *Jim_ReturnCode(int code) { return (code < 0 || code >= (int)jimReturnCodesSize ? "?" : jimReturnCodes[code]); } static __device__ int SetReturnCodeFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { // Try to convert into an integer int returnCode; jim_wide wideValue; if (JimGetWideNoErr(interp, objPtr, &wideValue) != JIM_ERROR) returnCode = (int)wideValue; else if (Jim_GetEnum(interp, objPtr, jimReturnCodes, &returnCode, NULL, JIM_NONE) != JIM_OK) { Jim_SetResultFormatted(interp, "expected return code but got \"%#s\"", objPtr); return JIM_ERROR; } // Free the old internal repr and set the new one Jim_FreeIntRep(interp, objPtr); objPtr->typePtr = &_returnCodeObjType; objPtr->internalRep.intValue = returnCode; return JIM_OK; } __device__ int Jim_GetReturnCode(Jim_Interp *interp, Jim_Obj *objPtr, int *intPtr) { if (objPtr->typePtr != &_returnCodeObjType && SetReturnCodeFromAny(interp, objPtr) == JIM_ERROR) return JIM_ERROR; *intPtr = objPtr->internalRep.intValue; return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Expression Parsing // ----------------------------------------------------------------------------- #pragma region Expression Parsing static __device__ int JimParseExprOperator(struct JimParserCtx *pc); static __device__ int JimParseExprNumber(struct JimParserCtx *pc); static __device__ int JimParseExprIrrational(struct JimParserCtx *pc); // Exrp's Stack machine operators opcodes // Binary operators (numbers) enum { // Continues on from the JIM_TT_ space // Operations JIM_EXPROP_MUL = JIM_TT_EXPR_OP,// 20 JIM_EXPROP_DIV, JIM_EXPROP_MOD, JIM_EXPROP_SUB, JIM_EXPROP_ADD, JIM_EXPROP_LSHIFT, JIM_EXPROP_RSHIFT, JIM_EXPROP_ROTL, JIM_EXPROP_ROTR, JIM_EXPROP_LT, JIM_EXPROP_GT, JIM_EXPROP_LTE, JIM_EXPROP_GTE, JIM_EXPROP_NUMEQ, JIM_EXPROP_NUMNE, JIM_EXPROP_BITAND, // 35 JIM_EXPROP_BITXOR, JIM_EXPROP_BITOR, // Note must keep these together JIM_EXPROP_LOGICAND, // 38 JIM_EXPROP_LOGICAND_LEFT, JIM_EXPROP_LOGICAND_RIGHT, // and these JIM_EXPROP_LOGICOR, // 41 JIM_EXPROP_LOGICOR_LEFT, JIM_EXPROP_LOGICOR_RIGHT, // and these // Ternary operators JIM_EXPROP_TERNARY, // 44 JIM_EXPROP_TERNARY_LEFT, JIM_EXPROP_TERNARY_RIGHT, // and these JIM_EXPROP_COLON, // 47 JIM_EXPROP_COLON_LEFT, JIM_EXPROP_COLON_RIGHT, JIM_EXPROP_POW, // 50 // Binary operators (strings) JIM_EXPROP_STREQ, // 51 JIM_EXPROP_STRNE, JIM_EXPROP_STRIN, JIM_EXPROP_STRNI, // Unary operators (numbers) JIM_EXPROP_NOT, // 55 JIM_EXPROP_BITNOT, JIM_EXPROP_UNARYMINUS, JIM_EXPROP_UNARYPLUS, // Functions JIM_EXPROP_FUNC_FIRST, // 59 JIM_EXPROP_FUNC_INT = JIM_EXPROP_FUNC_FIRST, JIM_EXPROP_FUNC_WIDE, JIM_EXPROP_FUNC_ABS, JIM_EXPROP_FUNC_DOUBLE, JIM_EXPROP_FUNC_ROUND, JIM_EXPROP_FUNC_RAND, JIM_EXPROP_FUNC_SRAND, // math functions from libm JIM_EXPROP_FUNC_SIN, // 65 JIM_EXPROP_FUNC_COS, JIM_EXPROP_FUNC_TAN, JIM_EXPROP_FUNC_ASIN, JIM_EXPROP_FUNC_ACOS, JIM_EXPROP_FUNC_ATAN, JIM_EXPROP_FUNC_SINH, JIM_EXPROP_FUNC_COSH, JIM_EXPROP_FUNC_TANH, JIM_EXPROP_FUNC_CEIL, JIM_EXPROP_FUNC_FLOOR, JIM_EXPROP_FUNC_EXP, JIM_EXPROP_FUNC_LOG, JIM_EXPROP_FUNC_LOG10, JIM_EXPROP_FUNC_SQRT, JIM_EXPROP_FUNC_POW, }; struct JimExprState { Jim_Obj **stack; int stacklen; int opcode; int skip; }; // Operators table typedef struct Jim_ExprOperator { const char *name; int (*funcop)(Jim_Interp*interp,struct JimExprState*e); unsigned char precedence; unsigned char arity; unsigned char lazy; unsigned char namelen; } Jim_ExprOperator; static __device__ void ExprPush(struct JimExprState *e, Jim_Obj *obj) { Jim_IncrRefCount(obj); e->stack[e->stacklen++] = obj; } static __device__ Jim_Obj *ExprPop(struct JimExprState *e) { return e->stack[--e->stacklen]; } static __device__ int JimExprOpNumUnary(Jim_Interp *interp, struct JimExprState *e) { int intresult = 1; int rc = JIM_OK; Jim_Obj *A = ExprPop(e); double dA, dC = 0; jim_wide wA, wC = 0; if ((A->typePtr != &_doubleObjType || A->bytes) && JimGetWideNoErr(interp, A, &wA) == JIM_OK) switch (e->opcode) { case JIM_EXPROP_FUNC_INT: case JIM_EXPROP_FUNC_WIDE: case JIM_EXPROP_FUNC_ROUND: case JIM_EXPROP_UNARYPLUS: wC = wA; break; case JIM_EXPROP_FUNC_DOUBLE: dC = (double)wA; intresult = 0; break; case JIM_EXPROP_FUNC_ABS: wC = (wA >= 0 ? wA : -wA); break; case JIM_EXPROP_UNARYMINUS: wC = -wA; break; case JIM_EXPROP_NOT: wC = !wA; break; default: abort(); } else if ((rc = Jim_GetDouble(interp, A, &dA)) == JIM_OK) switch (e->opcode) { case JIM_EXPROP_FUNC_INT: case JIM_EXPROP_FUNC_WIDE: wC = (long long)dA; break; case JIM_EXPROP_FUNC_ROUND: wC = (long long)(dA < 0 ? (dA - 0.5) : (dA + 0.5)); break; case JIM_EXPROP_FUNC_DOUBLE: case JIM_EXPROP_UNARYPLUS: dC = dA; intresult = 0; break; case JIM_EXPROP_FUNC_ABS: dC = (dA >= 0 ? dA : -dA); intresult = 0; break; case JIM_EXPROP_UNARYMINUS: dC = -dA; intresult = 0; break; case JIM_EXPROP_NOT: wC = !dA; break; default: abort(); } if (rc == JIM_OK) if (intresult) ExprPush(e, Jim_NewIntObj(interp, wC)); else ExprPush(e, Jim_NewDoubleObj(interp, dC)); Jim_DecrRefCount(interp, A); return rc; } static __device__ double JimRandDouble(Jim_Interp *interp) { unsigned long x; JimRandomBytes(interp, &x, sizeof(x)); return (double)x / (unsigned long)~0; } static __device__ int JimExprOpIntUnary(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *A = ExprPop(e); jim_wide wA; int rc = Jim_GetWide(interp, A, &wA); if (rc == JIM_OK) switch (e->opcode) { case JIM_EXPROP_BITNOT: ExprPush(e, Jim_NewIntObj(interp, ~wA)); break; case JIM_EXPROP_FUNC_SRAND: JimPrngSeed(interp, (unsigned char *)&wA, sizeof(wA)); ExprPush(e, Jim_NewDoubleObj(interp, JimRandDouble(interp))); break; default: abort(); } Jim_DecrRefCount(interp, A); return rc; } static __device__ int JimExprOpNone(Jim_Interp *interp, struct JimExprState *e) { JimPanic(e->opcode != JIM_EXPROP_FUNC_RAND, "JimExprOpNone only support rand()"); ExprPush(e, Jim_NewDoubleObj(interp, JimRandDouble(interp))); return JIM_OK; } #ifdef JIM_MATH_FUNCTIONS static __device__ int JimExprOpDoubleUnary(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *A = ExprPop(e); double dA, dC; int rc = Jim_GetDouble(interp, A, &dA); if (rc == JIM_OK) { switch (e->opcode) { case JIM_EXPROP_FUNC_SIN: dC = sin(dA); break; case JIM_EXPROP_FUNC_COS: dC = cos(dA); break; case JIM_EXPROP_FUNC_TAN: dC = tan(dA); break; case JIM_EXPROP_FUNC_ASIN: dC = asin(dA); break; case JIM_EXPROP_FUNC_ACOS: dC = acos(dA); break; case JIM_EXPROP_FUNC_ATAN: dC = atan(dA); break; case JIM_EXPROP_FUNC_SINH: dC = sinh(dA); break; case JIM_EXPROP_FUNC_COSH: dC = cosh(dA); break; case JIM_EXPROP_FUNC_TANH: dC = tanh(dA); break; case JIM_EXPROP_FUNC_CEIL: dC = ceil(dA); break; case JIM_EXPROP_FUNC_FLOOR: dC = floor(dA); break; case JIM_EXPROP_FUNC_EXP: dC = exp(dA); break; case JIM_EXPROP_FUNC_LOG: dC = log(dA); break; case JIM_EXPROP_FUNC_LOG10: dC = log10(dA); break; case JIM_EXPROP_FUNC_SQRT: dC = sqrt(dA); break; default: abort(); } ExprPush(e, Jim_NewDoubleObj(interp, dC)); } Jim_DecrRefCount(interp, A); return rc; } #endif // A binary operation on two ints static __device__ int JimExprOpIntBin(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); jim_wide wA, wB; int rc = JIM_ERROR; if (Jim_GetWide(interp, A, &wA) == JIM_OK && Jim_GetWide(interp, B, &wB) == JIM_OK) { jim_wide wC; rc = JIM_OK; switch (e->opcode) { case JIM_EXPROP_LSHIFT: wC = wA << wB; break; case JIM_EXPROP_RSHIFT: wC = wA >> wB; break; case JIM_EXPROP_BITAND: wC = wA & wB; break; case JIM_EXPROP_BITXOR: wC = wA ^ wB; break; case JIM_EXPROP_BITOR: wC = wA | wB; break; case JIM_EXPROP_MOD: if (wB == 0) { wC = 0; Jim_SetResultString(interp, "Division by zero", -1); rc = JIM_ERROR; } else { // From Tcl 8.x // This code is tricky: C doesn't guarantee much about the quotient or remainder, but Tcl does. // The remainder always has the same sign as the divisor and a smaller absolute value. int negative = 0; if (wB < 0) { wB = -wB; wA = -wA; negative = 1; } wC = wA % wB; if (wC < 0) wC += wB; if (negative) wC = -wC; } break; case JIM_EXPROP_ROTL: case JIM_EXPROP_ROTR: { // uint32_t would be better. But not everyone has inttypes.h? unsigned long uA = (unsigned long)wA; unsigned long uB = (unsigned long)wB; const unsigned int S = sizeof(unsigned long) * 8; // Shift left by the word size or more is undefined uB %= S; if (e->opcode == JIM_EXPROP_ROTR) uB = S - uB; wC = (unsigned long)(uA << uB) | (uA >> (S - uB)); break; } default: abort(); } ExprPush(e, Jim_NewIntObj(interp, wC)); } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return rc; } // A binary operation on two ints or two doubles (or two strings for some ops) static __device__ int JimExprOpBin(Jim_Interp *interp, struct JimExprState *e) { int intresult = 1; int rc = JIM_OK; double dA, dB, dC = 0; jim_wide wA, wB, wC = 0; Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); // Both are ints if ((A->typePtr != &_doubleObjType || A->bytes) && (B->typePtr != &_doubleObjType || B->bytes) && JimGetWideNoErr(interp, A, &wA) == JIM_OK && JimGetWideNoErr(interp, B, &wB) == JIM_OK) switch (e->opcode) { case JIM_EXPROP_POW: case JIM_EXPROP_FUNC_POW: wC = JimPowWide(wA, wB); break; case JIM_EXPROP_ADD: wC = wA + wB; break; case JIM_EXPROP_SUB: wC = wA - wB; break; case JIM_EXPROP_MUL: wC = wA * wB; break; case JIM_EXPROP_DIV: if (wB == 0) { Jim_SetResultString(interp, "Division by zero", -1); rc = JIM_ERROR; } else { // From Tcl 8.x // This code is tricky: C doesn't guarantee much about the quotient or remainder, but Tcl does. // The remainder always has the same sign as the divisor and a smaller absolute value. if (wB < 0) { wB = -wB; wA = -wA; } wC = wA / wB; if (wA % wB < 0) wC--; } break; case JIM_EXPROP_LT: wC = wA < wB; break; case JIM_EXPROP_GT: wC = wA > wB; break; case JIM_EXPROP_LTE: wC = wA <= wB; break; case JIM_EXPROP_GTE: wC = wA >= wB; break; case JIM_EXPROP_NUMEQ: wC = wA == wB; break; case JIM_EXPROP_NUMNE: wC = wA != wB; break; default: abort(); } else if (Jim_GetDouble(interp, A, &dA) == JIM_OK && Jim_GetDouble(interp, B, &dB) == JIM_OK) { intresult = 0; switch (e->opcode) { case JIM_EXPROP_POW: case JIM_EXPROP_FUNC_POW: #ifdef JIM_MATH_FUNCTIONS dC = pow(dA, dB); #else Jim_SetResultString(interp, "unsupported", -1); rc = JIM_ERROR; #endif break; case JIM_EXPROP_ADD: dC = dA + dB; break; case JIM_EXPROP_SUB: dC = dA - dB; break; case JIM_EXPROP_MUL: dC = dA * dB; break; case JIM_EXPROP_DIV: if (dB == 0) { #ifdef INFINITY dC = dA < 0 ? -INFINITY : INFINITY; #else dC = (dA < 0 ? -1.0 : 1.0) * strtod("Inf", NULL); #endif } else dC = dA / dB; break; case JIM_EXPROP_LT: wC = dA < dB; intresult = 1; break; case JIM_EXPROP_GT: wC = dA > dB; intresult = 1; break; case JIM_EXPROP_LTE: wC = dA <= dB; intresult = 1; break; case JIM_EXPROP_GTE: wC = dA >= dB; intresult = 1; break; case JIM_EXPROP_NUMEQ: wC = dA == dB; intresult = 1; break; case JIM_EXPROP_NUMNE: wC = dA != dB; intresult = 1; break; default: abort(); } } else { // Handle the string case // XXX: Could optimise the eq/ne case by checking lengths int i = Jim_StringCompareObj(interp, A, B, 0); switch (e->opcode) { case JIM_EXPROP_LT: wC = i < 0; break; case JIM_EXPROP_GT: wC = i > 0; break; case JIM_EXPROP_LTE: wC = i <= 0; break; case JIM_EXPROP_GTE: wC = i >= 0; break; case JIM_EXPROP_NUMEQ: wC = i == 0; break; case JIM_EXPROP_NUMNE: wC = i != 0; break; default: rc = JIM_ERROR; break; } } if (rc == JIM_OK) if (intresult) ExprPush(e, Jim_NewIntObj(interp, wC)); else ExprPush(e, Jim_NewDoubleObj(interp, dC)); Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return rc; } static __device__ int JimSearchList(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_Obj *valObj) { int listlen = Jim_ListLength(interp, listObjPtr); for (int i = 0; i < listlen; i++) if (Jim_StringEqObj(Jim_ListGetIndex(interp, listObjPtr, i), valObj)) return 1; return 0; } static __device__ int JimExprOpStrBin(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); jim_wide wC; switch (e->opcode) { case JIM_EXPROP_STREQ: case JIM_EXPROP_STRNE: wC = Jim_StringEqObj(A, B); if (e->opcode == JIM_EXPROP_STRNE) wC = !wC; break; case JIM_EXPROP_STRIN: wC = JimSearchList(interp, B, A); break; case JIM_EXPROP_STRNI: wC = !JimSearchList(interp, B, A); break; default: abort(); } ExprPush(e, Jim_NewIntObj(interp, wC)); Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return JIM_OK; } static __device__ int ExprBool(Jim_Interp *interp, Jim_Obj *obj) { long l; double d; if (Jim_GetLong(interp, obj, &l) == JIM_OK) return l != 0; if (Jim_GetDouble(interp, obj, &d) == JIM_OK) return d != 0; return -1; } static __device__ int JimExprOpAndLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *A = ExprPop(e); int rc = JIM_OK; switch (ExprBool(interp, A)) { case 0: // false, so skip RHS opcodes with a 0 result e->skip = (int)JimWideValue(skip); ExprPush(e, Jim_NewIntObj(interp, 0)); break; case 1: break; // true so continue case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, skip); return rc; } static __device__ int JimExprOpOrLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *A = ExprPop(e); int rc = JIM_OK; switch (ExprBool(interp, A)) { case 0: break; // false, so do nothing case 1: // true so skip RHS opcodes with a 1 result e->skip = (int)JimWideValue(skip); ExprPush(e, Jim_NewIntObj(interp, 1)); break; case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, skip); return rc; } static __device__ int JimExprOpAndOrRight(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *A = ExprPop(e); int rc = JIM_OK; switch (ExprBool(interp, A)) { case 0: ExprPush(e, Jim_NewIntObj(interp, 0)); break; case 1: ExprPush(e, Jim_NewIntObj(interp, 1)); break; case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); return rc; } static __device__ int JimExprOpTernaryLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *A = ExprPop(e); int rc = JIM_OK; // Repush A ExprPush(e, A); switch (ExprBool(interp, A)) { case 0: // false, skip RHS opcodes e->skip = (int)JimWideValue(skip); // Push a dummy value ExprPush(e, Jim_NewIntObj(interp, 0)); break; case 1: break; // true so do nothing case -1: rc = JIM_ERROR; break; // Invalid } Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, skip); return rc; } static __device__ int JimExprOpColonLeft(Jim_Interp *interp, struct JimExprState *e) { Jim_Obj *skip = ExprPop(e); Jim_Obj *B = ExprPop(e); Jim_Obj *A = ExprPop(e); // No need to check for A as non-boolean if (ExprBool(interp, A)) { // true, so skip RHS opcodes e->skip = (int)JimWideValue(skip); // Repush B as the answer ExprPush(e, B); } Jim_DecrRefCount(interp, skip); Jim_DecrRefCount(interp, A); Jim_DecrRefCount(interp, B); return JIM_OK; } static __device__ int JimExprOpNull(Jim_Interp *interp, struct JimExprState *e) { return JIM_OK; } enum { LAZY_NONE, LAZY_OP, LAZY_LEFT, LAZY_RIGHT }; // name - precedence - arity - opcode // This array *must* be kept in sync with the JIM_EXPROP enum. // The following macros pre-compute the string length at compile time. #define OPRINIT(N, P, A, F) {N, F, P, A, LAZY_NONE, sizeof(N) - 1} #define OPRINIT_LAZY(N, P, A, F, L) {N, F, P, A, L, sizeof(N) - 1} __constant__ static const struct Jim_ExprOperator Jim_ExprOperators[] = { OPRINIT("*", 110, 2, JimExprOpBin), OPRINIT("/", 110, 2, JimExprOpBin), OPRINIT("%", 110, 2, JimExprOpIntBin), // OPRINIT("-", 100, 2, JimExprOpBin), OPRINIT("+", 100, 2, JimExprOpBin), // OPRINIT("<<", 90, 2, JimExprOpIntBin), OPRINIT(">>", 90, 2, JimExprOpIntBin), // OPRINIT("<<<", 90, 2, JimExprOpIntBin), OPRINIT(">>>", 90, 2, JimExprOpIntBin), // OPRINIT("<", 80, 2, JimExprOpBin), OPRINIT(">", 80, 2, JimExprOpBin), OPRINIT("<=", 80, 2, JimExprOpBin), OPRINIT(">=", 80, 2, JimExprOpBin), // OPRINIT("==", 70, 2, JimExprOpBin), OPRINIT("!=", 70, 2, JimExprOpBin), // OPRINIT("&", 50, 2, JimExprOpIntBin), OPRINIT("^", 49, 2, JimExprOpIntBin), OPRINIT("|", 48, 2, JimExprOpIntBin), // OPRINIT_LAZY("&&", 10, 2, NULL, LAZY_OP), OPRINIT_LAZY(NULL, 10, 2, JimExprOpAndLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 10, 2, JimExprOpAndOrRight, LAZY_RIGHT), // OPRINIT_LAZY("||", 9, 2, NULL, LAZY_OP), OPRINIT_LAZY(NULL, 9, 2, JimExprOpOrLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 9, 2, JimExprOpAndOrRight, LAZY_RIGHT), // OPRINIT_LAZY("?", 5, 2, JimExprOpNull, LAZY_OP), OPRINIT_LAZY(NULL, 5, 2, JimExprOpTernaryLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 5, 2, JimExprOpNull, LAZY_RIGHT), // OPRINIT_LAZY(":", 5, 2, JimExprOpNull, LAZY_OP), OPRINIT_LAZY(NULL, 5, 2, JimExprOpColonLeft, LAZY_LEFT), OPRINIT_LAZY(NULL, 5, 2, JimExprOpNull, LAZY_RIGHT), // OPRINIT("**", 250, 2, JimExprOpBin), // OPRINIT("eq", 60, 2, JimExprOpStrBin), OPRINIT("ne", 60, 2, JimExprOpStrBin), // OPRINIT("in", 55, 2, JimExprOpStrBin), OPRINIT("ni", 55, 2, JimExprOpStrBin), // OPRINIT("!", 150, 1, JimExprOpNumUnary), OPRINIT("~", 150, 1, JimExprOpIntUnary), OPRINIT(NULL, 150, 1, JimExprOpNumUnary), OPRINIT(NULL, 150, 1, JimExprOpNumUnary), // OPRINIT("int", 200, 1, JimExprOpNumUnary), OPRINIT("wide", 200, 1, JimExprOpNumUnary), OPRINIT("abs", 200, 1, JimExprOpNumUnary), OPRINIT("double", 200, 1, JimExprOpNumUnary), OPRINIT("round", 200, 1, JimExprOpNumUnary), OPRINIT("rand", 200, 0, JimExprOpNone), OPRINIT("srand", 200, 1, JimExprOpIntUnary), // #ifdef JIM_MATH_FUNCTIONS OPRINIT("sin", 200, 1, JimExprOpDoubleUnary), OPRINIT("cos", 200, 1, JimExprOpDoubleUnary), OPRINIT("tan", 200, 1, JimExprOpDoubleUnary), OPRINIT("asin", 200, 1, JimExprOpDoubleUnary), OPRINIT("acos", 200, 1, JimExprOpDoubleUnary), OPRINIT("atan", 200, 1, JimExprOpDoubleUnary), OPRINIT("sinh", 200, 1, JimExprOpDoubleUnary), OPRINIT("cosh", 200, 1, JimExprOpDoubleUnary), OPRINIT("tanh", 200, 1, JimExprOpDoubleUnary), OPRINIT("ceil", 200, 1, JimExprOpDoubleUnary), OPRINIT("floor", 200, 1, JimExprOpDoubleUnary), OPRINIT("exp", 200, 1, JimExprOpDoubleUnary), OPRINIT("log", 200, 1, JimExprOpDoubleUnary), OPRINIT("log10", 200, 1, JimExprOpDoubleUnary), OPRINIT("sqrt", 200, 1, JimExprOpDoubleUnary), OPRINIT("pow", 200, 2, JimExprOpBin), #endif }; #undef OPRINIT #undef OPRINIT_LAZY #define JIM_EXPR_OPERATORS_NUM (sizeof(Jim_ExprOperators)/sizeof(struct Jim_ExprOperator)) static __device__ int JimParseExpression(struct JimParserCtx *pc) { // Discard spaces and quoted newline while (isspace(*pc->p) || (*(pc->p) == '\\' && *(pc->p + 1) == '\n')) { if (*pc->p == '\n') pc->linenr++; pc->p++; pc->len--; } // Common case pc->tline = pc->linenr; pc->tstart = pc->p; if (pc->len == 0) { pc->tend = pc->p; pc->tt = JIM_TT_EOL; pc->eof = 1; return JIM_OK; } switch (*(pc->p)) { case '(': pc->tt = JIM_TT_SUBEXPR_START; goto singlechar; case ')': pc->tt = JIM_TT_SUBEXPR_END; goto singlechar; case ',': pc->tt = JIM_TT_SUBEXPR_COMMA; singlechar: pc->tend = pc->p; pc->p++; pc->len--; break; case '[': return JimParseCmd(pc); case '$': if (JimParseVar(pc) == JIM_ERROR) return JimParseExprOperator(pc); else return (pc->tt == JIM_TT_EXPRSUGAR ? JIM_ERROR : JIM_OK); // Don't allow expr sugar in expressions case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': return JimParseExprNumber(pc); case '"': return JimParseQuote(pc); case '{': return JimParseBrace(pc); case 'N': case 'I': case 'n': case 'i': if (JimParseExprIrrational(pc) == JIM_ERROR) return JimParseExprOperator(pc); break; default: return JimParseExprOperator(pc); } return JIM_OK; } static __device__ int JimParseExprNumber(struct JimParserCtx *pc) { // Assume an integer for now pc->tt = JIM_TT_EXPR_INT; jim_strtoull(pc->p, (char **)&pc->p); // Tried as an integer, but perhaps it parses as a double if (strchr("eENnIi.", *pc->p) || pc->p == pc->tstart) { // Some stupid compilers insist they are cleverer that we are. Even a (void) cast doesn't prevent this warning! char *end; if (strtod(pc->tstart, &end)) { } // nothing if (end == pc->tstart) return JIM_ERROR; if (end > pc->p) { // Yes, double captured more chars pc->tt = JIM_TT_EXPR_DOUBLE; pc->p = end; } } pc->tend = pc->p - 1; pc->len -= (int)(pc->p - pc->tstart); return JIM_OK; } static __device__ int JimParseExprIrrational(struct JimParserCtx *pc) { const char *irrationals[] = { "NaN", "nan", "NAN", "Inf", "inf", "INF", NULL }; for (int i = 0; irrationals[i]; i++) { const char *irr = irrationals[i]; if (!strncmp(irr, pc->p, 3)) { pc->p += 3; pc->len -= 3; pc->tend = pc->p - 1; pc->tt = JIM_TT_EXPR_DOUBLE; return JIM_OK; } } return JIM_ERROR; } static __device__ int JimParseExprOperator(struct JimParserCtx *pc) { // Try to get the longest match int bestIdx = -1, bestLen = 0; for (int i = 0; i < (signed)JIM_EXPR_OPERATORS_NUM; i++) { const char * const opname = Jim_ExprOperators[i].name; const int oplen = Jim_ExprOperators[i].namelen; if (opname == NULL || opname[0] != pc->p[0]) continue; if (oplen > bestLen && !strncmp(opname, pc->p, oplen)) { bestIdx = i + JIM_TT_EXPR_OP; bestLen = oplen; } } if (bestIdx == -1) return JIM_ERROR; // Validate paretheses around function arguments if (bestIdx >= JIM_EXPROP_FUNC_FIRST) { const char *p = pc->p + bestLen; int len = pc->len - bestLen; while (len && isspace(*p)) { len--; p++; } if (*p != '(') return JIM_ERROR; } pc->tend = pc->p + bestLen - 1; pc->p += bestLen; pc->len -= bestLen; pc->tt = bestIdx; return JIM_OK; } __constant__ static Jim_ExprOperator _dummy_op; static __device__ const struct Jim_ExprOperator *JimExprOperatorInfoByOpcode(int opcode) { return (opcode < JIM_TT_EXPR_OP ? &_dummy_op : &Jim_ExprOperators[opcode - JIM_TT_EXPR_OP]); } __constant__ static const char * const _tt_names[JIM_TT_EXPR_OP] = { "NIL", "STR", "ESC", "VAR", "ARY", "CMD", "SEP", "EOL", "EOF", "LIN", "WRD", "(((", ")))", ",,,", "INT", "DBL", "$()" }; #ifdef __CUDACC__ __device__ char _jim_tt_name_buf[20]; #endif __device__ const char *jim_tt_name(int type) { if (type < JIM_TT_EXPR_OP) return _tt_names[type]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(type); #ifndef __CUDACC__ static char _jim_tt_name_buf[20]; #endif if (op->name) return op->name; sprintf(_jim_tt_name_buf, "(%d)", type); return _jim_tt_name_buf; } #pragma endregion // ----------------------------------------------------------------------------- // Expression Object // ----------------------------------------------------------------------------- #pragma region Expression Object static __device__ void FreeExprInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupExprInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ int SetExprFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); __constant__ static const Jim_ObjType _exprObjType = { "expression", FreeExprInternalRep, DupExprInternalRep, NULL, JIM_TYPE_REFERENCES, }; // Expr bytecode structure typedef struct ExprByteCode { ScriptToken *token; // Tokens array int len; // Length as number of tokens int inUse; // Used for sharing } ExprByteCode; static __device__ void ExprFreeByteCode(Jim_Interp *interp, ExprByteCode *expr) { for (int i = 0; i < expr->len; i++) Jim_DecrRefCount(interp, expr->token[i].objPtr); Jim_Free(expr->token); Jim_Free(expr); } static __device__ void FreeExprInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { ExprByteCode *expr = (ExprByteCode *)objPtr->internalRep.ptr; if (expr) { if (--expr->inUse != 0) return; ExprFreeByteCode(interp, expr); } } static __device__ void DupExprInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); JIM_NOTUSED(srcPtr); // Just returns an simple string dupPtr->typePtr = NULL; } // Check if an expr program looks correct static __device__ int ExprCheckCorrectness(ExprByteCode *expr) { int stacklen = 0; int ternary = 0; // Try to check if there are stack underflows, and make sure at the end of the program there is a single result on the stack. for (int i = 0; i < expr->len; i++) { ScriptToken *t = &expr->token[i]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); stacklen -= op->arity; if (stacklen < 0) break; if (t->type == JIM_EXPROP_TERNARY || t->type == JIM_EXPROP_TERNARY_LEFT) ternary++; else if (t->type == JIM_EXPROP_COLON || t->type == JIM_EXPROP_COLON_LEFT) ternary--; // All operations and operands add one to the stack stacklen++; } return (stacklen != 1 || ternary != 0 ? JIM_ERROR : JIM_OK); } // This procedure converts every occurrence of || and && opereators in lazy unary versions. // a b || is converted into: // // a <offset> |L b |R // // a b && is converted into: // // a <offset> &L b &R // // "|L" checks if 'a' is true: // 1) if it is true pushes 1 and skips <offset> instructions to reach the opcode just after |R. // 2) if it is false does nothing. // "|R" checks if 'b' is true: // 1) if it is true pushes 1, otherwise pushes 0. // // "&L" checks if 'a' is true: // 1) if it is true does nothing. // 2) If it is false pushes 0 and skips <offset> instructions to reach the opcode just after &R // "&R" checks if 'a' is true: // if it is true pushes 1, otherwise pushes 0. static __device__ int ExprAddLazyOperator(Jim_Interp *interp, ExprByteCode *expr, ParseToken *t) { // Search for the end of the first operator int leftindex = expr->len - 1; int arity = 1; while (arity) { ScriptToken *tt = &expr->token[leftindex]; if (tt->type >= JIM_TT_EXPR_OP) arity += JimExprOperatorInfoByOpcode(tt->type)->arity; arity--; if (--leftindex < 0) return JIM_ERROR; } leftindex++; // Move them up memmove(&expr->token[leftindex + 2], &expr->token[leftindex], sizeof(*expr->token) * (expr->len - leftindex)); expr->len += 2; int offset = (expr->len - leftindex) - 1; // Now we rely on the fact the the left and right version have opcodes 1 and 2 after the main opcode respectively expr->token[leftindex + 1].type = t->type + 1; expr->token[leftindex + 1].objPtr = interp->emptyObj; expr->token[leftindex].type = JIM_TT_EXPR_INT; expr->token[leftindex].objPtr = Jim_NewIntObj(interp, offset); // Now add the 'R' operator expr->token[expr->len].objPtr = interp->emptyObj; expr->token[expr->len].type = t->type + 2; expr->len++; // Do we need to adjust the skip count for any &L, |L, ?L or :L in the left operand? */ for (int i = leftindex - 1; i > 0; i--) { const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(expr->token[i].type); if (op->lazy == LAZY_LEFT) if (JimWideValue(expr->token[i - 1].objPtr) + i - 1 >= leftindex) JimWideValue(expr->token[i - 1].objPtr) += 2; } return JIM_OK; } static __device__ int ExprAddOperator(Jim_Interp *interp, ExprByteCode * expr, ParseToken *t) { struct ScriptToken *token = &expr->token[expr->len]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); if (op->lazy == LAZY_OP) { if (ExprAddLazyOperator(interp, expr, t) != JIM_OK) { Jim_SetResultFormatted(interp, "Expression has bad operands to %s", op->name); return JIM_ERROR; } } else { token->objPtr = interp->emptyObj; token->type = t->type; expr->len++; } return JIM_OK; } // Returns the index of the COLON_LEFT to the left of 'right_index' taking into account nesting. // The expression *must* be well formed, thus a COLON_LEFT will always be found. static __device__ int ExprTernaryGetColonLeftIndex(ExprByteCode *expr, int right_index) { int ternary_count = 1; right_index--; while (right_index > 1) { if (expr->token[right_index].type == JIM_EXPROP_TERNARY_LEFT) ternary_count--; else if (expr->token[right_index].type == JIM_EXPROP_COLON_RIGHT) ternary_count++; else if (expr->token[right_index].type == JIM_EXPROP_COLON_LEFT && ternary_count == 1) return right_index; right_index--; } return -1; // notreached } // Find the left/right indices for the ternary expression to the left of 'right_index'. // Returns 1 if found, and fills in *prev_right_index and *prev_left_index. Otherwise returns 0. static __device__ int ExprTernaryGetMoveIndices(ExprByteCode *expr, int right_index, int *prev_right_index, int *prev_left_index) { int i = right_index - 1; int ternary_count = 1; while (i > 1) { if (expr->token[i].type == JIM_EXPROP_TERNARY_LEFT) { if (--ternary_count == 0 && expr->token[i - 2].type == JIM_EXPROP_COLON_RIGHT) { *prev_right_index = i - 2; *prev_left_index = ExprTernaryGetColonLeftIndex(expr, *prev_right_index); return 1; } } else if (expr->token[i].type == JIM_EXPROP_COLON_RIGHT) { if (ternary_count == 0) return 0; ternary_count++; } i--; } return 0; } // ExprTernaryReorderExpression description // ======================================== // ?: is right-to-left associative which doesn't work with the stack-based expression engine. The fix is to reorder the bytecode. // // The expression: // expr 1?2:0?3:4 // // Has initial bytecode: // '1' '2' (40=TERNARY_LEFT) '2' (41=TERNARY_RIGHT) '2' (43=COLON_LEFT) '0' (44=COLON_RIGHT) // '2' (40=TERNARY_LEFT) '3' (41=TERNARY_RIGHT) '2' (43=COLON_LEFT) '4' (44=COLON_RIGHT) // // The fix involves simulating this expression instead: // expr 1?2:(0?3:4) // // With the following bytecode: // '1' '2' (40=TERNARY_LEFT) '2' (41=TERNARY_RIGHT) '10' (43=COLON_LEFT) '0' '2' (40=TERNARY_LEFT) // '3' (41=TERNARY_RIGHT) '2' (43=COLON_LEFT) '4' (44=COLON_RIGHT) (44=COLON_RIGHT) // // i.e. The token COLON_RIGHT at index 8 is moved towards the end of the stack, all tokens above 8 are shifted down and the skip count of the token JIM_EXPROP_COLON_LEFT at index 5 is // incremented by the amount tokens shifted down. The token JIM_EXPROP_COLON_RIGHT that is moved is identified as immediately preceeding a token JIM_EXPROP_TERNARY_LEFT // // ExprTernaryReorderExpression works thus as follows : // - start from the end of the stack // - while walking towards the beginning of the stack // if token=JIM_EXPROP_COLON_RIGHT then // find the associated token JIM_EXPROP_TERNARY_LEFT, which allows to // find the associated token previous(JIM_EXPROP_COLON_RIGHT) // find the associated token previous(JIM_EXPROP_LEFT_RIGHT) // if all found then // perform the rotation // update the skip count of the token previous(JIM_EXPROP_LEFT_RIGHT) // end if // end if // // Note: care has to be taken for nested ternary constructs!!! static __device__ void ExprTernaryReorderExpression(Jim_Interp *interp, ExprByteCode *expr) { for (int i = expr->len - 1; i > 1; i--) { if (expr->token[i].type != JIM_EXPROP_COLON_RIGHT) continue; // COLON_RIGHT found: get the indexes needed to move the tokens in the stack (if any) int prev_right_index; int prev_left_index; if (ExprTernaryGetMoveIndices(expr, i, &prev_right_index, &prev_left_index) == 0) continue; // rotate tokens down // // +-> [i] : JIM_EXPROP_COLON_RIGHT // | | | // | V V // | [...] : ... // | | | // | V V // | [...] : ... // | | | // | V V // +- [prev_right_index] : JIM_EXPROP_COLON_RIGHT ScriptToken tmp = expr->token[prev_right_index]; for (int j = prev_right_index; j < i; j++) expr->token[j] = expr->token[j + 1]; expr->token[i] = tmp; // Increment the 'skip' count associated to the previous JIM_EXPROP_COLON_LEFT token // This is 'colon left increment' = i - prev_right_index // [prev_left_index] : JIM_EXPROP_LEFT_RIGHT // [prev_left_index-1] : skip_count JimWideValue(expr->token[prev_left_index-1].objPtr) += (i - prev_right_index); // Adjust for i-- in the loop i++; } } static __device__ ExprByteCode *ExprCreateByteCode(Jim_Interp *interp, const ParseTokenList *tokenlist, Jim_Obj *fileNameObj) { int ok = 1; int i; int prevtt = JIM_TT_NONE; int have_ternary = 0; // -1 for EOL int count = tokenlist->count - 1; ExprByteCode *expr = (ExprByteCode *)Jim_Alloc(sizeof(*expr)); expr->inUse = 1; expr->len = 0; Jim_Stack stack; Jim_InitStack(&stack); // Need extra bytecodes for lazy operators. Also check for the ternary operator for (i = 0; i < tokenlist->count; i++) { ParseToken *t = &tokenlist->list[i]; const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); if (op->lazy == LAZY_OP) { count += 2; // Ternary is a lazy op but also needs reordering if (t->type == JIM_EXPROP_TERNARY) have_ternary = 1; } } expr->token = (ScriptToken *)Jim_Alloc(sizeof(ScriptToken) * count); for (i = 0; i < tokenlist->count && ok; i++) { ParseToken *t = &tokenlist->list[i]; // Next token will be stored here struct ScriptToken *token = &expr->token[expr->len]; if (t->type == JIM_TT_EOL) break; switch (t->type) { case JIM_TT_STR: case JIM_TT_ESC: case JIM_TT_VAR: case JIM_TT_DICTSUGAR: case JIM_TT_EXPRSUGAR: case JIM_TT_CMD: token->type = t->type; strexpr: token->objPtr = Jim_NewStringObj(interp, t->token, t->len); // Only commands need source info if (t->type == JIM_TT_CMD) JimSetSourceInfo(interp, token->objPtr, fileNameObj, t->line); expr->len++; break; case JIM_TT_EXPR_INT: case JIM_TT_EXPR_DOUBLE: { char *endptr; if (t->type == JIM_TT_EXPR_INT) token->objPtr = Jim_NewIntObj(interp, jim_strtoull(t->token, &endptr)); else token->objPtr = Jim_NewDoubleObj(interp, strtod(t->token, &endptr)); if (endptr != t->token + t->len) { // Conversion failed, so just store it as a string Jim_FreeNewObj(interp, token->objPtr); token->type = JIM_TT_STR; goto strexpr; } token->type = t->type; expr->len++; break; } case JIM_TT_SUBEXPR_START: Jim_StackPush(&stack, t); prevtt = JIM_TT_NONE; continue; case JIM_TT_SUBEXPR_COMMA: continue; // Simple approach. Comma is simply ignored case JIM_TT_SUBEXPR_END: ok = 0; while (Jim_StackLen(&stack)) { ParseToken *tt = (ParseToken *)Jim_StackPop(&stack); if (tt->type == JIM_TT_SUBEXPR_START) { ok = 1; break; } if (ExprAddOperator(interp, expr, tt) != JIM_OK) goto err; } if (!ok) { Jim_SetResultString(interp, "Unexpected close parenthesis", -1); goto err; } break; default: { // Must be an operator // Convert -/+ to unary minus or unary plus if necessary if (prevtt == JIM_TT_NONE || prevtt >= JIM_TT_EXPR_OP) { if (t->type == JIM_EXPROP_SUB) t->type = JIM_EXPROP_UNARYMINUS; else if (t->type == JIM_EXPROP_ADD) t->type = JIM_EXPROP_UNARYPLUS; } const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(t->type); // Now handle precedence ParseToken *tt; while ((tt = (ParseToken *)Jim_StackPeek(&stack)) != NULL) { const struct Jim_ExprOperator *tt_op = JimExprOperatorInfoByOpcode(tt->type); // Note that right-to-left associativity of ?: operator is handled later if (op->arity != 1 && tt_op->precedence >= op->precedence) { if (ExprAddOperator(interp, expr, tt) != JIM_OK) { ok = 0; goto err; } Jim_StackPop(&stack); } else break; } Jim_StackPush(&stack, t); break; } } prevtt = t->type; } // Reduce any remaining subexpr while (Jim_StackLen(&stack)) { ParseToken *tt = (ParseToken *)Jim_StackPop(&stack); if (tt->type == JIM_TT_SUBEXPR_START) { ok = 0; Jim_SetResultString(interp, "Missing close parenthesis", -1); goto err; } if (ExprAddOperator(interp, expr, tt) != JIM_OK) { ok = 0; goto err; } } if (have_ternary) ExprTernaryReorderExpression(interp, expr); err: // Free the stack used for the compilation Jim_FreeStack(&stack); for (i = 0; i < expr->len; i++) Jim_IncrRefCount(expr->token[i].objPtr); if (!ok) { ExprFreeByteCode(interp, expr); return NULL; } return expr; } // This method takes the string representation of an expression and generates a program for the Expr's stack-based VM. static __device__ int SetExprFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) { int rc = JIM_ERROR; // Try to get information about filename / line number int line; Jim_Obj *fileNameObj; if (objPtr->typePtr == &_sourceObjType) { fileNameObj = objPtr->internalRep.sourceValue.fileNameObj; line = objPtr->internalRep.sourceValue.lineNumber; } else { fileNameObj = interp->emptyObj; line = 1; } Jim_IncrRefCount(fileNameObj); int exprTextLen; const char *exprText = Jim_GetString(objPtr, &exprTextLen); // Initially tokenise the expression into tokenlist ParseTokenList tokenlist; ScriptTokenListInit(&tokenlist); struct JimParserCtx parser; JimParserInit(&parser, exprText, exprTextLen, line); struct ExprByteCode *expr; while (!parser.eof) { if (JimParseExpression(&parser) != JIM_OK) { ScriptTokenListFree(&tokenlist); invalidexpr: Jim_SetResultFormatted(interp, "syntax error in expression: \"%#s\"", objPtr); expr = NULL; goto err; } ScriptAddToken(&tokenlist, parser.tstart, (int)(parser.tend - parser.tstart + 1), parser.tt, parser.tline); } #ifdef DEBUG_SHOW_EXPR_TOKENS { printf("==== Expr Tokens (%s) ====\n", Jim_String(fileNameObj)); for (int i = 0; i < tokenlist.count; i++) printf("[%2d]@%d %s '%.*s'\n", i, tokenlist.list[i].line, jim_tt_name(tokenlist.list[i].type), tokenlist.list[i].len, tokenlist.list[i].token); } #endif if (JimParseCheckMissing(interp, parser.missing.ch) == JIM_ERROR) { ScriptTokenListFree(&tokenlist); Jim_DecrRefCount(interp, fileNameObj); return JIM_ERROR; } // Now create the expression bytecode from the tokenlist expr = ExprCreateByteCode(interp, &tokenlist, fileNameObj); // No longer need the token list ScriptTokenListFree(&tokenlist); if (!expr) goto err; #ifdef DEBUG_SHOW_EXPR { printf("==== Expr ====\n"); for (int i = 0; i < expr->len; i++) { ScriptToken *t = &expr->token[i]; printf("[%2d] %s '%s'\n", i, jim_tt_name(t->type), Jim_String(t->objPtr)); } } #endif // Check program correctness if (ExprCheckCorrectness(expr) != JIM_OK) { ExprFreeByteCode(interp, expr); goto invalidexpr; } rc = JIM_OK; err: // Free the old internal rep and set the new one Jim_DecrRefCount(interp, fileNameObj); Jim_FreeIntRep(interp, objPtr); Jim_SetIntRepPtr(objPtr, expr); objPtr->typePtr = &_exprObjType; return rc; } static __device__ ExprByteCode *JimGetExpression(Jim_Interp *interp, Jim_Obj *objPtr) { if (objPtr->typePtr != &_exprObjType) if (SetExprFromAny(interp, objPtr) != JIM_OK) return NULL; return (ExprByteCode *)Jim_GetIntRepPtr(objPtr); } #ifdef JIM_OPTIMIZATION static __device__ Jim_Obj *JimExprIntValOrVar(Jim_Interp *interp, const ScriptToken *token) { if (token->type == JIM_TT_EXPR_INT) return token->objPtr; else if (token->type == JIM_TT_VAR) return Jim_GetVariable(interp, token->objPtr, JIM_NONE); else if (token->type == JIM_TT_DICTSUGAR) return JimExpandDictSugar(interp, token->objPtr); else return NULL; } #endif // ----------------------------------------------------------------------------- // Expressions evaluation. // Jim uses a specialized stack-based virtual machine for expressions, that takes advantage of the fact that expr's operators can't be redefined. // Jim_EvalExpression() uses the bytecode compiled by SetExprFromAny() method of the "expression" object. // On success a Tcl Object containing the result of the evaluation is stored into expResultPtrPtr (having refcount of 1), and JIM_OK is returned. // On error the function returns a retcode != to JIM_OK and set a suitable error on the interp. // ----------------------------------------------------------------------------- #define JIM_EE_STATICSTACK_LEN 10 __device__ int Jim_EvalExpression(Jim_Interp *interp, Jim_Obj *exprObjPtr, Jim_Obj **exprResultPtrPtr) { Jim_Obj *staticStack[JIM_EE_STATICSTACK_LEN]; int i; int retcode = JIM_OK; struct JimExprState e; ExprByteCode *expr = JimGetExpression(interp, exprObjPtr); if (!expr) return JIM_ERROR; // error in expression #ifdef JIM_OPTIMIZATION // Check for one of the following common expressions used by while/for // CONST // $a // !$a // $a < CONST, $a < $b // $a <= CONST, $a <= $b // $a > CONST, $a > $b // $a >= CONST, $a >= $b // $a != CONST, $a != $b // $a == CONST, $a == $b { Jim_Obj *objPtr; // STEP 1 -- Check if there are the conditions to run the specialized version of while switch (expr->len) { case 1: objPtr = JimExprIntValOrVar(interp, &expr->token[0]); if (objPtr) { Jim_IncrRefCount(objPtr); *exprResultPtrPtr = objPtr; return JIM_OK; } break; case 2: if (expr->token[1].type == JIM_EXPROP_NOT) { objPtr = JimExprIntValOrVar(interp, &expr->token[0]); if (objPtr && JimIsWide(objPtr)) { *exprResultPtrPtr = (JimWideValue(objPtr) ? interp->falseObj : interp->trueObj); Jim_IncrRefCount(*exprResultPtrPtr); return JIM_OK; } } break; case 3: objPtr = JimExprIntValOrVar(interp, &expr->token[0]); if (objPtr && JimIsWide(objPtr)) { Jim_Obj *objPtr2 = JimExprIntValOrVar(interp, &expr->token[1]); if (objPtr2 && JimIsWide(objPtr2)) { jim_wide wideValueA = JimWideValue(objPtr); jim_wide wideValueB = JimWideValue(objPtr2); int cmpRes; switch (expr->token[2].type) { case JIM_EXPROP_LT: cmpRes = wideValueA < wideValueB; break; case JIM_EXPROP_LTE: cmpRes = wideValueA <= wideValueB; break; case JIM_EXPROP_GT: cmpRes = wideValueA > wideValueB; break; case JIM_EXPROP_GTE: cmpRes = wideValueA >= wideValueB; break; case JIM_EXPROP_NUMEQ: cmpRes = wideValueA == wideValueB; break; case JIM_EXPROP_NUMNE: cmpRes = wideValueA != wideValueB; break; default: goto noopt; } *exprResultPtrPtr = (cmpRes ? interp->trueObj : interp->falseObj); Jim_IncrRefCount(*exprResultPtrPtr); return JIM_OK; } } break; } } noopt: #endif // In order to avoid that the internal repr gets freed due to shimmering of the exprObjPtr's object, we make the internal rep shared. expr->inUse++; // The stack-based expr VM itself // Stack allocation. Expr programs have the feature that a program of length N can't require a stack longer than N. e.stack = (expr->len > JIM_EE_STATICSTACK_LEN ? (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * expr->len) : staticStack); e.stacklen = 0; // Execute every instruction Jim_Obj *objPtr; for (i = 0; i < expr->len && retcode == JIM_OK; i++) switch (expr->token[i].type) { case JIM_TT_EXPR_INT: case JIM_TT_EXPR_DOUBLE: case JIM_TT_STR: ExprPush(&e, expr->token[i].objPtr); break; case JIM_TT_VAR: objPtr = Jim_GetVariable(interp, expr->token[i].objPtr, JIM_ERRMSG); if (objPtr) ExprPush(&e, objPtr); else retcode = JIM_ERROR; break; case JIM_TT_DICTSUGAR: objPtr = JimExpandDictSugar(interp, expr->token[i].objPtr); if (objPtr) ExprPush(&e, objPtr); else retcode = JIM_ERROR; break; case JIM_TT_ESC: retcode = Jim_SubstObj(interp, expr->token[i].objPtr, &objPtr, JIM_NONE); if (retcode == JIM_OK) ExprPush(&e, objPtr); break; case JIM_TT_CMD: retcode = Jim_EvalObj(interp, expr->token[i].objPtr); if (retcode == JIM_OK) ExprPush(&e, Jim_GetResult(interp)); break; default: { // Find and execute the operation e.skip = 0; e.opcode = expr->token[i].type; retcode = JimExprOperatorInfoByOpcode(e.opcode)->funcop(interp, &e); // Skip some opcodes if necessary i += e.skip; continue; } } expr->inUse--; if (retcode == JIM_OK) *exprResultPtrPtr = ExprPop(&e); else for (i = 0; i < e.stacklen; i++) Jim_DecrRefCount(interp, e.stack[i]); if (e.stack != staticStack) Jim_Free(e.stack); return retcode; } __device__ int Jim_GetBoolFromExpr(Jim_Interp *interp, Jim_Obj *exprObjPtr, int *boolPtr) { jim_wide wideValue; double doubleValue; Jim_Obj *exprResultPtr; int retcode = Jim_EvalExpression(interp, exprObjPtr, &exprResultPtr); if (retcode != JIM_OK) return retcode; if (JimGetWideNoErr(interp, exprResultPtr, &wideValue) != JIM_OK) { if (Jim_GetDouble(interp, exprResultPtr, &doubleValue) != JIM_OK) { Jim_DecrRefCount(interp, exprResultPtr); return JIM_ERROR; } else { Jim_DecrRefCount(interp, exprResultPtr); *boolPtr = doubleValue != 0; return JIM_OK; } } *boolPtr = wideValue != 0; Jim_DecrRefCount(interp, exprResultPtr); return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // ScanFormat String Object // ----------------------------------------------------------------------------- #pragma region ScanFormat String Object // This Jim_Obj will held a parsed representation of a format string passed to the Jim_ScanString command. For error diagnostics, the scanformat string has // to be parsed in its entirely first and then, if correct, can be used for scanning. To avoid endless re-parsing, the parsed representation will be // stored in an internal representation and re-used for performance reason. // A ScanFmtPartDescr will held the information of /one/ part of the whole scanformat string. This part will later be used to extract information // out from the string to be parsed by Jim_ScanString typedef struct ScanFmtPartDescr { char *arg; // Specification of a CHARSET conversion char *prefix; // Prefix to be scanned literally before conversion size_t width; // Maximal width of input to be converted int pos; // -1 - no assign, 0 - natural pos, >0 - XPG3 pos char type; // Type of conversion (e.g. c, d, f) char modifier; // Modify type (e.g. l - long, h - short } ScanFmtPartDescr; //The ScanFmtStringObj will hold the internal representation of a scanformat string parsed and separated in part descriptions. Furthermore it contains //the original string representation of the scanformat string to allow for fast update of the Jim_Obj's string representation part. //As an add-on the internal object representation adds some scratch pad area for usage by Jim_ScanString to avoid endless allocating and freeing of memory for purpose of string scanning. //The error member points to a static allocated string in case of a mal-formed scanformat string or it contains '0' (NULL) in case of a valid parse representation. //The whole memory of the internal representation is allocated as a single area of memory that will be internally separated. So freeing and duplicating of such an object is cheap typedef struct ScanFmtStringObj { jim_wide size; // Size of internal repr in bytes char *stringRep; // Original string representation size_t count; // Number of ScanFmtPartDescr contained size_t convCount; // Number of conversions that will assign size_t maxPos; // Max position index if XPG3 is used const char *error; // Ptr to error text (NULL if no error char *scratch; // Some scratch pad used by Jim_ScanString ScanFmtPartDescr descr[1]; // The vector of partial descriptions } ScanFmtStringObj; static __device__ void FreeScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); static __device__ void DupScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); static __device__ void UpdateStringOfScanFmt(Jim_Obj *objPtr); __constant__ static const Jim_ObjType _scanFmtStringObjType = { "scanformatstring", FreeScanFmtInternalRep, DupScanFmtInternalRep, UpdateStringOfScanFmt, JIM_TYPE_NONE, }; __device__ void FreeScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) { JIM_NOTUSED(interp); Jim_Free((char *)objPtr->internalRep.ptr); objPtr->internalRep.ptr = 0; } __device__ void DupScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) { JIM_NOTUSED(interp); size_t size = (size_t)((ScanFmtStringObj *)srcPtr->internalRep.ptr)->size; ScanFmtStringObj *newVec = (ScanFmtStringObj *)Jim_Alloc((int)size); memcpy(newVec, srcPtr->internalRep.ptr, size); dupPtr->internalRep.ptr = newVec; dupPtr->typePtr = &_scanFmtStringObjType; } static __device__ void UpdateStringOfScanFmt(Jim_Obj *objPtr) { JimSetStringBytes(objPtr, ((ScanFmtStringObj *)objPtr->internalRep.ptr)->stringRep); } // SetScanFmtFromAny will parse a given string and create the internal representation of the format specification. In case of an error // the error data member of the internal representation will be set to an descriptive error text and the function will be left with // JIM_ERROR to indicate unsucessful parsing (aka. malformed scanformat specification static __device__ int SetScanFmtFromAny(Jim_Interp *interp, Jim_Obj *objPtr) { int maxCount, i, lastPos = -1; const char *fmt = objPtr->bytes; int maxFmtLen = objPtr->length; const char *fmtEnd = fmt + maxFmtLen; int curr; Jim_FreeIntRep(interp, objPtr); // Count how many conversions could take place maximally for (i = 0, maxCount = 0; i < maxFmtLen; ++i) if (fmt[i] == '%') ++maxCount; // Calculate an approximation of the memory necessary int approxSize = sizeof(ScanFmtStringObj) // Size of the container + (maxCount + 1) * sizeof(ScanFmtPartDescr) // Size of all partials + maxFmtLen * sizeof(char) + 3 + 1 // Scratch + "%n" + '\0' + maxFmtLen * sizeof(char) + 1 // Original stringrep + maxFmtLen * sizeof(char) // Arg for CHARSETs + (maxCount + 1) * sizeof(char) // '\0' for every partial + 1; // safety byte ScanFmtStringObj *fmtObj = (ScanFmtStringObj *)Jim_Alloc(approxSize); memset(fmtObj, 0, approxSize); fmtObj->size = approxSize; fmtObj->maxPos = 0; fmtObj->scratch = (char *)&fmtObj->descr[maxCount + 1]; fmtObj->stringRep = fmtObj->scratch + maxFmtLen + 3 + 1; memcpy(fmtObj->stringRep, fmt, maxFmtLen); char *buffer = fmtObj->stringRep + maxFmtLen + 1; objPtr->internalRep.ptr = fmtObj; objPtr->typePtr = &_scanFmtStringObjType; for (i = 0, curr = 0; fmt < fmtEnd; ++fmt) { int width = 0, skip; ScanFmtPartDescr *descr = &fmtObj->descr[curr]; fmtObj->count++; descr->width = 0; // Assume width unspecified // Overread and store any "literal" prefix if (*fmt != '%' || fmt[1] == '%') { descr->type = 0; descr->prefix = &buffer[i]; for (; fmt < fmtEnd; ++fmt) { if (*fmt == '%') { if (fmt[1] != '%') break; ++fmt; } buffer[i++] = *fmt; } buffer[i++] = 0; } // Skip the conversion introducing '%' sign ++fmt; // End reached due to non-conversion literal only? if (fmt >= fmtEnd) goto done; descr->pos = 0; // Assume "natural" positioning if (*fmt == '*') { descr->pos = -1; // Okay, conversion will not be assigned ++fmt; } else fmtObj->convCount++; // Otherwise count as assign-conversion // Check if next token is a number (could be width or pos if (sscanf(fmt, "%d%n", &width, &skip) == 1) { fmt += skip; // Was the number a XPG3 position specifier? if (descr->pos != -1 && *fmt == '$') { int prev; ++fmt; descr->pos = width; width = 0; // Look if "natural" postioning and XPG3 one was mixed if ((lastPos == 0 && descr->pos > 0) || (lastPos > 0 && descr->pos == 0)) { fmtObj->error = "cannot mix \"%\" and \"%n$\" conversion specifiers"; return JIM_ERROR; } // Look if this position was already used for (prev = 0; prev < curr; ++prev) { if (fmtObj->descr[prev].pos == -1) continue; if (fmtObj->descr[prev].pos == descr->pos) { fmtObj->error = "variable is assigned by multiple \"%n$\" conversion specifiers"; return JIM_ERROR; } } // Try to find a width after the XPG3 specifier if (sscanf(fmt, "%d%n", &width, &skip) == 1) { descr->width = width; fmt += skip; } if (descr->pos > 0 && (size_t) descr->pos > fmtObj->maxPos) fmtObj->maxPos = descr->pos; } // Number was not a XPG3, so it has to be a width else descr->width = width; } // If positioning mode was undetermined yet, fix this if (lastPos == -1) lastPos = descr->pos; // Handle CHARSET conversion type ... if (*fmt == '[') { int swapped = 1, beg = i, end, j; descr->type = '['; descr->arg = &buffer[i]; ++fmt; if (*fmt == '^') buffer[i++] = *fmt++; if (*fmt == ']') buffer[i++] = *fmt++; while (*fmt && *fmt != ']') buffer[i++] = *fmt++; if (*fmt != ']') { fmtObj->error = "unmatched [ in format string"; return JIM_ERROR; } end = i; buffer[i++] = 0; // In case a range fence was given "backwards", swap it while (swapped) { swapped = 0; for (j = beg + 1; j < end - 1; ++j) if (buffer[j] == '-' && buffer[j - 1] > buffer[j + 1]) { char tmp = buffer[j - 1]; buffer[j - 1] = buffer[j + 1]; buffer[j + 1] = tmp; swapped = 1; } } } else { // Remember any valid modifier if given if (strchr("hlL", *fmt) != 0) descr->modifier = _tolower((int)*fmt++); descr->type = *fmt; if (!strchr("efgcsndoxui", *fmt)) { fmtObj->error = "bad scan conversion character"; return JIM_ERROR; } else if (*fmt == 'c' && descr->width != 0) { fmtObj->error = "field width may not be specified in %c " "conversion"; return JIM_ERROR; } else if (*fmt == 'u' && descr->modifier == 'l') { fmtObj->error = "unsigned wide not supported"; return JIM_ERROR; } } curr++; } done: return JIM_OK; } // Some accessor macros to allow lowlevel access to fields of internal repr #define FormatGetCnvCount(_fo_) ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->convCount #define FormatGetMaxPos(_fo_) ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->maxPos #define FormatGetError(_fo_) ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->error // JimScanAString is used to scan an unspecified string that ends with next WS, or a string that is specified via a charset. static __device__ Jim_Obj *JimScanAString(Jim_Interp *interp, const char *sdescr, const char *str) { char *buffer = Jim_StrDup(str); char *p = buffer; while (*str) { int c; int n; if (!sdescr && isspace(*str)) break; // EOS via WS if unspecified n = utf8_tounicode(str, &c); if (sdescr && !JimCharsetMatch(sdescr, c, JIM_CHARSET_SCAN)) break; while (n--) *p++ = *str++; } *p = 0; return Jim_NewStringObjNoAlloc(interp, buffer, (int)(p - buffer)); } // ScanOneEntry will scan one entry out of the string passed as argument. It use the sscanf() function for this task. After extracting and // converting of the value, the count of scanned characters will be returned of -1 in case of no conversion tool place and string was already scanned thru static __device__ int ScanOneEntry(Jim_Interp *interp, const char *str, int pos, int strLen, ScanFmtStringObj * fmtObj, long idx, Jim_Obj **valObjPtr) { const char *tok; const ScanFmtPartDescr *descr = &fmtObj->descr[idx]; size_t scanned = 0; size_t anchor = pos; int i; Jim_Obj *tmpObj = NULL; // First pessimistically assume, we will not scan anything :-) *valObjPtr = 0; if (descr->prefix) { // There was a prefix given before the conversion, skip it and adjust the string-to-be-parsed accordingly for (i = 0; pos < strLen && descr->prefix[i]; ++i) { // If prefix require, skip WS if (isspace(descr->prefix[i])) while (pos < strLen && isspace(str[pos])) ++pos; // Prefix do not match here, leave the loop else if (descr->prefix[i] != str[pos]) break; // Prefix matched so far, next round else ++pos; } // All of str consumed: EOF condition if (pos >= strLen) return -1; // Not whole prefix consumed, no conversion possible else if (descr->prefix[i] != 0) return 0; } // For all but following conversion, skip leading WS if (descr->type != 'c' && descr->type != '[' && descr->type != 'n') while (isspace(str[pos])) ++pos; // Determine how much skipped/scanned so far scanned = pos - anchor; // %c is a special, simple case. no width // Return pseudo conversion means: how much scanned so far? if (descr->type == 'n') *valObjPtr = Jim_NewIntObj(interp, anchor + scanned); // Cannot scan anything, as str is totally consumed else if (pos >= strLen) return -1; else if (descr->type == 'c') { int c; scanned += utf8_tounicode(&str[pos], &c); *valObjPtr = Jim_NewIntObj(interp, c); return (int)scanned; } else { // Processing of conversions follows ... if (descr->width > 0) { // Do not try to scan as fas as possible but only the given width. To ensure this, we copy the part that should be scanned. size_t sLen = utf8_strlen(&str[pos], strLen - pos); size_t tLen = descr->width > sLen ? sLen : descr->width; tmpObj = Jim_NewStringObjUtf8(interp, str + pos, (int)tLen); tok = tmpObj->bytes; } // As no width was given, simply refer to the original string else tok = &str[pos]; switch (descr->type) { case 'd': case 'o': case 'x': case 'u': case 'i': { char *endp; // Position where the number finished int base = (descr->type == 'o' ? 8 : descr->type == 'x' ? 16 : descr->type == 'i' ? 0 : 10); // Try to scan a number with the given base jim_wide w = (base == 0 ? jim_strtoull(tok, &endp) : strtoull(tok, &endp, base)); if (endp != tok) { // There was some number sucessfully scanned! *valObjPtr = Jim_NewIntObj(interp, w); // Adjust the number-of-chars scanned so far scanned += endp - tok; } // Nothing was scanned. We have to determine if this happened due to e.g. prefix mismatch or input str exhausted else scanned = (*tok ? 0 : -1); break; } case 's': case '[':{ *valObjPtr = JimScanAString(interp, descr->arg, tok); scanned += Jim_Length(*valObjPtr); break; } case 'e': case 'f': case 'g': { char *endp; double value = strtod(tok, &endp); if (endp != tok) { // There was some number sucessfully scanned! *valObjPtr = Jim_NewDoubleObj(interp, value); // Adjust the number-of-chars scanned so far scanned += endp - tok; } // Nothing was scanned. We have to determine if this happened due to e.g. prefix mismatch or input str exhausted else scanned = (*tok ? 0 : -1); break; } } // If a substring was allocated (due to pre-defined width) do not forget to free it if (tmpObj) Jim_FreeNewObj(interp, tmpObj); } return (int)scanned; } // Jim_ScanString is the workhorse of string scanning. It will scan a given string and returns all converted (and not ignored) values in a list back // to the caller. If an error occured, a NULL pointer will be returned __device__ Jim_Obj *Jim_ScanString(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *fmtObjPtr, int flags) { int scanned = 1; const char *str = Jim_String(strObjPtr); int strLen = Jim_Utf8Length(interp, strObjPtr); Jim_Obj *resultList = 0; Jim_Obj *emptyStr = 0; // This should never happen. The format object should already be of the correct type JimPanic(fmtObjPtr->typePtr != &_scanFmtStringObjType, "Jim_ScanString() for non-scan format"); ScanFmtStringObj *fmtObj = (ScanFmtStringObj *)fmtObjPtr->internalRep.ptr; // Check if format specification was valid if (fmtObj->error != 0) { if (flags & JIM_ERRMSG) Jim_SetResultString(interp, fmtObj->error, -1); return 0; } // Allocate a new "shared" empty string for all unassigned conversions emptyStr = Jim_NewEmptyStringObj(interp); Jim_IncrRefCount(emptyStr); // Create a list and fill it with empty strings up to max specified XPG3 resultList = Jim_NewListObj(interp, NULL, 0); int resultc; Jim_Obj **resultVec = 0; size_t i, pos; if (fmtObj->maxPos > 0) { for (i = 0; i < fmtObj->maxPos; ++i) Jim_ListAppendElement(interp, resultList, emptyStr); JimListGetElements(interp, resultList, &resultc, &resultVec); } // Now handle every partial format description for (i = 0, pos = 0; i < fmtObj->count; ++i) { ScanFmtPartDescr *descr = &(fmtObj->descr[i]); Jim_Obj *value = 0; // Only last type may be "literal" w/o conversion - skip it! if (descr->type == 0) continue; // As long as any conversion could be done, we will proceed if (scanned > 0) scanned = ScanOneEntry(interp, str, (int)pos, strLen, fmtObj, (long)i, &value); // In case our first try results in EOF, we will leave if (scanned == -1 && i == 0) goto eof; // Advance next pos-to-be-scanned for the amount scanned already pos += scanned; // value == 0 means no conversion took place so take empty string if (value == 0) value = Jim_NewEmptyStringObj(interp); // If value is a non-assignable one, skip it if (descr->pos == -1) Jim_FreeNewObj(interp, value); // Otherwise append it to the result list if no XPG3 was given else if (descr->pos == 0) Jim_ListAppendElement(interp, resultList, value); else if (resultVec[descr->pos - 1] == emptyStr) { // But due to given XPG3, put the value into the corr. slot Jim_DecrRefCount(interp, resultVec[descr->pos - 1]); Jim_IncrRefCount(value); resultVec[descr->pos - 1] = value; } else { // Otherwise, the slot was already used - free obj and ERROR Jim_FreeNewObj(interp, value); goto err; } } Jim_DecrRefCount(interp, emptyStr); return resultList; eof: Jim_DecrRefCount(interp, emptyStr); Jim_FreeNewObj(interp, resultList); return (Jim_Obj *)EOF; err: Jim_DecrRefCount(interp, emptyStr); Jim_FreeNewObj(interp, resultList); return 0; } #pragma endregion // ----------------------------------------------------------------------------- // Pseudo Random Number Generation // ----------------------------------------------------------------------------- #pragma region Pseudo Random Number Generation // Initialize the sbox with the numbers from 0 to 255 static __device__ void JimPrngInit(Jim_Interp *interp) { #define PRNG_SEED_SIZE 256 time_t t = time(NULL); interp->prngState = (Jim_PrngState *)Jim_Alloc(sizeof(Jim_PrngState)); unsigned int *seed = (unsigned int *)Jim_Alloc(PRNG_SEED_SIZE * sizeof(*seed)); for (int i = 0; i < PRNG_SEED_SIZE; i++) seed[i] = (unsigned int)(rand() ^ t ^ clock()); JimPrngSeed(interp, (unsigned char *)seed, PRNG_SEED_SIZE * sizeof(*seed)); Jim_Free(seed); } // Generates N bytes of random data static __device__ void JimRandomBytes(Jim_Interp *interp, void *dest, unsigned int len) { unsigned char *destByte = (unsigned char *)dest; unsigned int si, sj, x; // initialization, only needed the first time if (interp->prngState == NULL) JimPrngInit(interp); Jim_PrngState *prng = interp->prngState; // generates 'len' bytes of pseudo-random numbers for (x = 0; x < len; x++) { prng->i = (prng->i + 1) & 0xff; si = prng->sbox[prng->i]; prng->j = (prng->j + si) & 0xff; sj = prng->sbox[prng->j]; prng->sbox[prng->i] = sj; prng->sbox[prng->j] = si; *destByte++ = prng->sbox[(si + sj) & 0xff]; } } // Re-seed the generator with user-provided bytes static __device__ void JimPrngSeed(Jim_Interp *interp, unsigned char *seed, int seedLen) { int i; // initialization, only needed the first time if (interp->prngState == NULL) JimPrngInit(interp); Jim_PrngState *prng = interp->prngState; // Set the sbox[i] with i for (i = 0; i < 256; i++) prng->sbox[i] = i; // Now use the seed to perform a random permutation of the sbox for (i = 0; i < seedLen; i++) { unsigned char t = prng->sbox[i & 0xFF]; prng->sbox[i & 0xFF] = prng->sbox[seed[i]]; prng->sbox[seed[i]] = t; } prng->i = prng->j = 0; // discard at least the first 256 bytes of stream. borrow the seed buffer for this for (i = 0; i < 256; i += seedLen) JimRandomBytes(interp, seed, seedLen); } // [incr] static __device__ int Jim_IncrCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { jim_wide wideValue, increment = 1; if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "varName ?increment?"); return JIM_ERROR; } if (argc == 3) if (Jim_GetWide(interp, argv[2], &increment) != JIM_OK) return JIM_ERROR; Jim_Obj *intObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); // Set missing variable to 0 if (!intObjPtr) wideValue = 0; else if (Jim_GetWide(interp, intObjPtr, &wideValue) != JIM_OK) return JIM_ERROR; if (!intObjPtr || Jim_IsShared(intObjPtr)) { intObjPtr = Jim_NewIntObj(interp, wideValue + increment); if (Jim_SetVariable(interp, argv[1], intObjPtr) != JIM_OK) { Jim_FreeNewObj(interp, intObjPtr); return JIM_ERROR; } } else { // Can do it the quick way Jim_InvalidateStringRep(intObjPtr); JimWideValue(intObjPtr) = wideValue + increment; // The following step is required in order to invalidate the string repr of "FOO" if the var name is on the form of "FOO(IDX)" if (argv[1]->typePtr != &_variableObjType) Jim_SetVariable(interp, argv[1], intObjPtr); // Note that this can't fail since GetVariable already succeeded } Jim_SetResult(interp, intObjPtr); return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Eval // ----------------------------------------------------------------------------- #pragma region Eval #define JIM_EVAL_SARGV_LEN 8 // static arguments vector length #define JIM_EVAL_SINTV_LEN 8 // static interpolation vector length // Handle calls to the [unknown] command static __device__ int JimUnknown(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { // If JimUnknown() is recursively called too many times... done here if (interp->unknown_called > 50) return JIM_ERROR; // The object interp->unknown just contains the "unknown" string, it is used in order to avoid to lookup the unknown command every time but instead to cache the result. // If the [unknown] command does not exist ... if (Jim_GetCommand(interp, interp->unknown, JIM_NONE) == NULL) return JIM_ERROR; interp->unknown_called++; // XXX: Are we losing fileNameObj and linenr? int retcode = Jim_EvalObjPrefix(interp, interp->unknown, argc, argv); interp->unknown_called--; return retcode; } static __device__ int JimInvokeCommand(Jim_Interp *interp, int objc, Jim_Obj *const *objv) { #if 0 printf("invoke"); for (int j = 0; j < objc; j++) printf(" '%s'", Jim_String(objv[j])); printf("\n"); #endif int retcode; Jim_Cmd *cmdPtr; if (interp->framePtr->tailcallCmd) { // Special tailcall command was pre-resolved cmdPtr = interp->framePtr->tailcallCmd; interp->framePtr->tailcallCmd = NULL; } else { cmdPtr = Jim_GetCommand(interp, objv[0], JIM_ERRMSG); if (cmdPtr == NULL) return JimUnknown(interp, objc, objv); JimIncrCmdRefCount(cmdPtr); } if (interp->evalDepth == interp->maxEvalDepth) { Jim_SetResultString(interp, "Infinite eval recursion", -1); retcode = JIM_ERROR; goto out; } interp->evalDepth++; // Call it -- Make sure result is an empty object. Jim_ResetResult(interp); if (cmdPtr->isproc) retcode = JimCallProcedure(interp, cmdPtr, objc, objv); else { ClientData clientData = interp->cmdPrivData = cmdPtr->u.native.privData; retcode = cmdPtr->u.native.cmdProc(clientData, interp, objc, objv); } interp->evalDepth--; out: JimDecrCmdRefCount(interp, cmdPtr); return retcode; } // Eval the object vector 'objv' composed of 'objc' elements. Every element is used as single argument. // Jim_EvalObj() will call this function every time its object argument is of "list" type, with no string representation. // // This is possible because the string representation of a list object generated by the UpdateStringOfList is made // in a way that ensures that every list element is a different command argument. __device__ int Jim_EvalObjVector(Jim_Interp *interp, int objc, Jim_Obj *const *objv) { // Incr refcount of arguments int i; for (i = 0; i < objc; i++) Jim_IncrRefCount(objv[i]); int retcode = JimInvokeCommand(interp, objc, objv); // Decr refcount of arguments and return the retcode for (i = 0; i < objc; i++) Jim_DecrRefCount(interp, objv[i]); return retcode; } // Invokes 'prefix' as a command with the objv array as arguments. __device__ int Jim_EvalObjPrefix(Jim_Interp *interp, Jim_Obj *prefix, int objc, Jim_Obj *const *objv) { Jim_Obj **nargv = (Jim_Obj **)Jim_Alloc((objc + 1) * sizeof(*nargv)); nargv[0] = prefix; memcpy(&nargv[1], &objv[0], sizeof(nargv[0]) * objc); int ret = Jim_EvalObjVector(interp, objc + 1, nargv); Jim_Free(nargv); return ret; } static __device__ void JimAddErrorToStack(Jim_Interp *interp, ScriptObj *script) { if (!interp->errorFlag) { // This is the first error, so save the file/line information and reset the stack interp->errorFlag = 1; Jim_IncrRefCount(script->fileNameObj); Jim_DecrRefCount(interp, interp->errorFileNameObj); interp->errorFileNameObj = script->fileNameObj; interp->errorLine = script->linenr; JimResetStackTrace(interp); // Always add a level where the error first occurs interp->addStackTrace++; } // Now if this is an "interesting" level, add it to the stack trace if (interp->addStackTrace > 0) { // Add the stack info for the current level JimAppendStackTrace(interp, Jim_String(interp->errorProc), script->fileNameObj, script->linenr); // Note: if we didn't have a filename for this level, don't clear the addStackTrace flag so we can pick it up at the next level if (Jim_Length(script->fileNameObj)) interp->addStackTrace = 0; Jim_DecrRefCount(interp, interp->errorProc); interp->errorProc = interp->emptyObj; Jim_IncrRefCount(interp->errorProc); } } static __device__ int JimSubstOneToken(Jim_Interp *interp, const ScriptToken *token, Jim_Obj **objPtrPtr) { Jim_Obj *objPtr; switch (token->type) { case JIM_TT_STR: case JIM_TT_ESC: objPtr = token->objPtr; break; case JIM_TT_VAR: objPtr = Jim_GetVariable(interp, token->objPtr, JIM_ERRMSG); break; case JIM_TT_DICTSUGAR: objPtr = JimExpandDictSugar(interp, token->objPtr); break; case JIM_TT_EXPRSUGAR: objPtr = JimExpandExprSugar(interp, token->objPtr); break; case JIM_TT_CMD: switch (Jim_EvalObj(interp, token->objPtr)) { case JIM_OK: case JIM_RETURN: objPtr = interp->result; break; case JIM_BREAK: return JIM_BREAK; // Stop substituting case JIM_CONTINUE: return JIM_CONTINUE; // just skip this one default: return JIM_ERROR; } break; default: JimPanic(1, "default token type (%d) reached " "in Jim_SubstObj().", token->type); objPtr = NULL; break; } if (objPtr) { *objPtrPtr = objPtr; return JIM_OK; } return JIM_ERROR; } // Interpolate the given tokens into a unique Jim_Obj returned by reference via *objPtrPtr. This function is only called by Jim_EvalObj() and Jim_SubstObj() The returned object has refcount = 0. static __device__ Jim_Obj *JimInterpolateTokens(Jim_Interp *interp, const ScriptToken * token, int tokens, int flags) { int totlen = 0, i; Jim_Obj *sintv[JIM_EVAL_SINTV_LEN]; Jim_Obj **intv = (tokens <= JIM_EVAL_SINTV_LEN ? sintv : (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * tokens)); // Compute every token forming the argument in the intv objects vector. for (i = 0; i < tokens; i++) { switch (JimSubstOneToken(interp, &token[i], &intv[i])) { case JIM_OK: case JIM_RETURN: break; case JIM_BREAK: if (flags & JIM_SUBST_FLAG) { // Stop here tokens = i; continue; } // XXX: Should probably set an error about break outside loop // fall through to error case JIM_CONTINUE: if (flags & JIM_SUBST_FLAG) { intv[i] = NULL; continue; } // XXX: Ditto continue outside loop // fall through to error default: while (i--) Jim_DecrRefCount(interp, intv[i]); if (intv != sintv) Jim_Free(intv); return NULL; } Jim_IncrRefCount(intv[i]); Jim_String(intv[i]); totlen += intv[i]->length; } // Fast path return for a single token if (tokens == 1 && intv[0] && intv == sintv) { Jim_DecrRefCount(interp, intv[0]); return intv[0]; } // Concatenate every token in an unique object. Jim_Obj *objPtr = Jim_NewStringObjNoAlloc(interp, NULL, 0); if (tokens == 4 && token[0].type == JIM_TT_ESC && token[1].type == JIM_TT_ESC && token[2].type == JIM_TT_VAR) { // May be able to do fast interpolated object -> dictSubst objPtr->typePtr = &_interpolatedObjType; objPtr->internalRep.dictSubstValue.varNameObjPtr = token[0].objPtr; objPtr->internalRep.dictSubstValue.indexObjPtr = intv[2]; Jim_IncrRefCount(intv[2]); } // The first interpolated token is source, so preserve the source info else if (tokens && intv[0] && intv[0]->typePtr == &_sourceObjType) JimSetSourceInfo(interp, objPtr, intv[0]->internalRep.sourceValue.fileNameObj, intv[0]->internalRep.sourceValue.lineNumber); char *s = objPtr->bytes = (char *)Jim_Alloc(totlen + 1); objPtr->length = totlen; for (i = 0; i < tokens; i++) { if (intv[i]) { memcpy(s, intv[i]->bytes, intv[i]->length); s += intv[i]->length; Jim_DecrRefCount(interp, intv[i]); } } objPtr->bytes[totlen] = '\0'; // Free the intv vector if not static. if (intv != sintv) Jim_Free(intv); return objPtr; } // listPtr *must* be a list. The contents of the list is evaluated with the first element as the command and the remaining elements as the arguments. static __device__ int JimEvalObjList(Jim_Interp *interp, Jim_Obj *listPtr) { int retcode = JIM_OK; JimPanic(Jim_IsList(listPtr) == 0, "JimEvalObjList() invoked on non-list."); if (listPtr->internalRep.listValue.len) { Jim_IncrRefCount(listPtr); retcode = JimInvokeCommand(interp, listPtr->internalRep.listValue.len, listPtr->internalRep.listValue.ele); Jim_DecrRefCount(interp, listPtr); } return retcode; } __device__ int Jim_EvalObjList(Jim_Interp *interp, Jim_Obj *listPtr) { SetListFromAny(interp, listPtr); return JimEvalObjList(interp, listPtr); } __device__ int Jim_EvalObj(Jim_Interp *interp, Jim_Obj *scriptObjPtr) { int i; ScriptToken *token; int retcode = JIM_OK; Jim_Obj *sargv[JIM_EVAL_SARGV_LEN], **argv = NULL; Jim_Obj *prevScriptObj; // If the object is of type "list", with no string rep we can call a specialized version of Jim_EvalObj() if (Jim_IsList(scriptObjPtr) && scriptObjPtr->bytes == NULL) return JimEvalObjList(interp, scriptObjPtr); Jim_IncrRefCount(scriptObjPtr); // Make sure it's shared ScriptObj *script = JimGetScript(interp, scriptObjPtr); if (!JimScriptValid(interp, script)) { Jim_DecrRefCount(interp, scriptObjPtr); return JIM_ERROR; } // Reset the interpreter result. This is useful to return the empty result in the case of empty program. Jim_ResetResult(interp); token = script->token; #ifdef JIM_OPTIMIZATION // Check for one of the following common scripts used by for, while {} // incr a if (script->len == 0) { Jim_DecrRefCount(interp, scriptObjPtr); return JIM_OK; } if (script->len == 3 && token[1].objPtr->typePtr == &_commandObjType && token[1].objPtr->internalRep.cmdValue.cmdPtr->isproc == 0 && token[1].objPtr->internalRep.cmdValue.cmdPtr->u.native.cmdProc == Jim_IncrCoreCommand && token[2].objPtr->typePtr == &_variableObjType) { Jim_Obj *objPtr = Jim_GetVariable(interp, token[2].objPtr, JIM_NONE); if (objPtr && !Jim_IsShared(objPtr) && objPtr->typePtr == &_intObjType) { JimWideValue(objPtr)++; Jim_InvalidateStringRep(objPtr); Jim_DecrRefCount(interp, scriptObjPtr); Jim_SetResult(interp, objPtr); return JIM_OK; } } #endif // Now we have to make sure the internal repr will not be freed on shimmering. // Think for example to this: // set x {llength $x; ... some more code ...}; eval $x // In order to preserve the internal rep, we increment the inUse field of the script internal rep structure. script->inUse++; // Stash the current script prevScriptObj = interp->currentScriptObj; interp->currentScriptObj = scriptObjPtr; interp->errorFlag = 0; argv = sargv; // Execute every command sequentially until the end of the script or an error occurs. for (i = 0; i < script->len && retcode == JIM_OK; ) { // First token of the line is always JIM_TT_LINE int argc = token[i].objPtr->internalRep.scriptLineValue.argc; script->linenr = token[i].objPtr->internalRep.scriptLineValue.line; // Allocate the arguments vector if required if (argc > JIM_EVAL_SARGV_LEN) argv = (Jim_Obj **)Jim_Alloc(sizeof(Jim_Obj *) * argc); // Skip the JIM_TT_LINE token i++; // Populate the arguments objects. If an error occurs, retcode will be set and 'j' will be set to the number of args expanded int j; for (j = 0; j < argc; j++) { long wordtokens = 1; int expand = 0; Jim_Obj *wordObjPtr = NULL; if (token[i].type == JIM_TT_WORD) { wordtokens = (long)JimWideValue(token[i++].objPtr); if (wordtokens < 0) { expand = 1; wordtokens = -wordtokens; } } // Fast path if the token does not need interpolation if (wordtokens == 1) switch (token[i].type) { case JIM_TT_ESC: case JIM_TT_STR: wordObjPtr = token[i].objPtr; break; case JIM_TT_VAR: wordObjPtr = Jim_GetVariable(interp, token[i].objPtr, JIM_ERRMSG); break; case JIM_TT_EXPRSUGAR: wordObjPtr = JimExpandExprSugar(interp, token[i].objPtr); break; case JIM_TT_DICTSUGAR: wordObjPtr = JimExpandDictSugar(interp, token[i].objPtr); break; case JIM_TT_CMD: retcode = Jim_EvalObj(interp, token[i].objPtr); if (retcode == JIM_OK) wordObjPtr = Jim_GetResult(interp); break; default: JimPanic(1, "default token type reached " "in Jim_EvalObj()."); } // For interpolation we call a helper function to do the work for us. else wordObjPtr = JimInterpolateTokens(interp, token + i, wordtokens, JIM_NONE); if (!wordObjPtr) { if (retcode == JIM_OK) retcode = JIM_ERROR; break; } Jim_IncrRefCount(wordObjPtr); i += wordtokens; if (!expand) argv[j] = wordObjPtr; else { // Need to expand wordObjPtr into multiple args from argv[j] ... int len = Jim_ListLength(interp, wordObjPtr); int newargc = argc + len - 1; int k; if (len > 1) { if (argv == sargv) { if (newargc > JIM_EVAL_SARGV_LEN) { argv = (Jim_Obj **)Jim_Alloc(sizeof(*argv) * newargc); memcpy(argv, sargv, sizeof(*argv) * j); } } // Need to realloc to make room for (len - 1) more entries else argv = (Jim_Obj **)Jim_Realloc(argv, sizeof(*argv) * newargc); } // Now copy in the expanded version for (k = 0; k < len; k++) { argv[j++] = wordObjPtr->internalRep.listValue.ele[k]; Jim_IncrRefCount(wordObjPtr->internalRep.listValue.ele[k]); } // The original object reference is no longer needed, after the expansion it is no longer present on the argument vector, but the single elements are in its place. Jim_DecrRefCount(interp, wordObjPtr); // And update the indexes j--; argc += len - 1; } } if (retcode == JIM_OK && argc) { // Invoke the command retcode = JimInvokeCommand(interp, argc, argv); // Check for a signal after each command if (Jim_CheckSignal(interp)) retcode = JIM_SIGNAL; } // Finished with the command, so decrement ref counts of each argument while (j-- > 0) Jim_DecrRefCount(interp, argv[j]); if (argv != sargv) { Jim_Free(argv); argv = sargv; } } // Possibly add to the error stack trace if (retcode == JIM_ERROR) JimAddErrorToStack(interp, script); // Propagate the addStackTrace value through 'return -code error' // No need to add stack trace else if (retcode != JIM_RETURN || interp->returnCode != JIM_ERROR) interp->addStackTrace = 0; // Restore the current script interp->currentScriptObj = prevScriptObj; // Note that we don't have to decrement inUse, because the following code transfers our use of the reference again to the script object. Jim_FreeIntRep(interp, scriptObjPtr); scriptObjPtr->typePtr = &_scriptObjType; Jim_SetIntRepPtr(scriptObjPtr, script); Jim_DecrRefCount(interp, scriptObjPtr); return retcode; } static __device__ int JimSetProcArg(Jim_Interp *interp, Jim_Obj *argNameObj, Jim_Obj *argValObj) { int retcode; // If argObjPtr begins with '&', do an automatic upvar const char *varname = Jim_String(argNameObj); if (*varname == '&') { // First check that the target variable exists Jim_Obj *objPtr; Jim_CallFrame *savedCallFrame = interp->framePtr; interp->framePtr = interp->framePtr->parent; objPtr = Jim_GetVariable(interp, argValObj, JIM_ERRMSG); interp->framePtr = savedCallFrame; if (!objPtr) return JIM_ERROR; // It exists, so perform the binding. objPtr = Jim_NewStringObj(interp, varname + 1, -1); Jim_IncrRefCount(objPtr); retcode = Jim_SetVariableLink(interp, objPtr, argValObj, interp->framePtr->parent); Jim_DecrRefCount(interp, objPtr); } else retcode = Jim_SetVariable(interp, argNameObj, argValObj); return retcode; } // Sets the interp result to be an error message indicating the required proc args. static __device__ void JimSetProcWrongArgs(Jim_Interp *interp, Jim_Obj *procNameObj, Jim_Cmd *cmd) { // Create a nice error message, consistent with Tcl 8.5 Jim_Obj *argmsg = Jim_NewStringObj(interp, "", 0); for (int i = 0; i < cmd->u.proc.argListLen; i++) { Jim_AppendString(interp, argmsg, " ", 1); if (i == cmd->u.proc.argsPos) { if (cmd->u.proc.arglist[i].defaultObjPtr) { // Renamed args Jim_AppendString(interp, argmsg, "?", 1); Jim_AppendObj(interp, argmsg, cmd->u.proc.arglist[i].defaultObjPtr); Jim_AppendString(interp, argmsg, " ...?", -1); } // We have plain args else Jim_AppendString(interp, argmsg, "?arg...?", -1); } else { if (cmd->u.proc.arglist[i].defaultObjPtr) { Jim_AppendString(interp, argmsg, "?", 1); Jim_AppendObj(interp, argmsg, cmd->u.proc.arglist[i].nameObjPtr); Jim_AppendString(interp, argmsg, "?", 1); } else { const char *arg = Jim_String(cmd->u.proc.arglist[i].nameObjPtr); if (*arg == '&') arg++; Jim_AppendString(interp, argmsg, arg, -1); } } } Jim_SetResultFormatted(interp, "wrong # args: should be \"%#s%#s\"", procNameObj, argmsg); Jim_FreeNewObj(interp, argmsg); } #ifdef jim_ext_namespace // [namespace eval] __device__ int Jim_EvalNamespace(Jim_Interp *interp, Jim_Obj *scriptObj, Jim_Obj *nsObj) { // Create a new callframe Jim_CallFrame *callFramePtr = JimCreateCallFrame(interp, interp->framePtr, nsObj); callFramePtr->argv = &interp->emptyObj; callFramePtr->argc = 0; callFramePtr->procArgsObjPtr = NULL; callFramePtr->procBodyObjPtr = scriptObj; callFramePtr->staticVars = NULL; callFramePtr->fileNameObj = interp->emptyObj; callFramePtr->line = 0; Jim_IncrRefCount(scriptObj); interp->framePtr = callFramePtr; // Check if there are too nested calls int retcode; if (interp->framePtr->level == interp->maxCallFrameDepth) { Jim_SetResultString(interp, "Too many nested calls. Infinite recursion?", -1); retcode = JIM_ERROR; } // Eval the body else retcode = Jim_EvalObj(interp, scriptObj); // Destroy the callframe interp->framePtr = interp->framePtr->parent; JimFreeCallFrame(interp, callFramePtr, JIM_FCF_REUSE); return retcode; } #endif // Call a procedure implemented in Tcl. It's possible to speed-up a lot this function, currently the callframes are not cached, but allocated and // destroied every time. What is expecially costly is to create/destroy the local vars hash table every time. // This can be fixed just implementing callframes caching in JimCreateCallFrame() and JimFreeCallFrame(). static __device__ int JimCallProcedure(Jim_Interp *interp, Jim_Cmd *cmd, int argc, Jim_Obj *const *argv) { // Check arity if (argc - 1 < cmd->u.proc.reqArity || (cmd->u.proc.argsPos < 0 && argc - 1 > cmd->u.proc.reqArity + cmd->u.proc.optArity)) { JimSetProcWrongArgs(interp, argv[0], cmd); return JIM_ERROR; } // Optimise for procedure with no body - useful for optional debugging if (Jim_Length(cmd->u.proc.bodyObjPtr) == 0) return JIM_OK; // Check if there are too nested calls if (interp->framePtr->level == interp->maxCallFrameDepth) { Jim_SetResultString(interp, "Too many nested calls. Infinite recursion?", -1); return JIM_ERROR; } // Create a new callframe Jim_CallFrame *callFramePtr = JimCreateCallFrame(interp, interp->framePtr, cmd->u.proc.nsObj); callFramePtr->argv = argv; callFramePtr->argc = argc; callFramePtr->procArgsObjPtr = cmd->u.proc.argListObjPtr; callFramePtr->procBodyObjPtr = cmd->u.proc.bodyObjPtr; callFramePtr->staticVars = cmd->u.proc.staticVars; // Remember where we were called from ScriptObj *script = JimGetScript(interp, interp->currentScriptObj); callFramePtr->fileNameObj = script->fileNameObj; callFramePtr->line = script->linenr; Jim_IncrRefCount(cmd->u.proc.argListObjPtr); Jim_IncrRefCount(cmd->u.proc.bodyObjPtr); interp->framePtr = callFramePtr; // How many optional args are available int optargs = (argc - 1 - cmd->u.proc.reqArity); int retcode; // Step 'i' along the actual args, and step 'd' along the formal args int i = 1; for (int d = 0; d < cmd->u.proc.argListLen; d++) { Jim_Obj *nameObjPtr = cmd->u.proc.arglist[d].nameObjPtr; if (d == cmd->u.proc.argsPos) { // assign $args Jim_Obj *listObjPtr; int argsLen = 0; if (cmd->u.proc.reqArity + cmd->u.proc.optArity < argc - 1) argsLen = argc - 1 - (cmd->u.proc.reqArity + cmd->u.proc.optArity); listObjPtr = Jim_NewListObj(interp, &argv[i], argsLen); /* It is possible to rename args. */ if (cmd->u.proc.arglist[d].defaultObjPtr) { nameObjPtr =cmd->u.proc.arglist[d].defaultObjPtr; } retcode = Jim_SetVariable(interp, nameObjPtr, listObjPtr); if (retcode != JIM_OK) goto badargset; i += argsLen; continue; } // Optional or required? if (cmd->u.proc.arglist[d].defaultObjPtr == NULL || optargs-- > 0) retcode = JimSetProcArg(interp, nameObjPtr, argv[i++]); // Ran out, so use the default else retcode = Jim_SetVariable(interp, nameObjPtr, cmd->u.proc.arglist[d].defaultObjPtr); if (retcode != JIM_OK) goto badargset; } // Eval the body retcode = Jim_EvalObj(interp, cmd->u.proc.bodyObjPtr); badargset: // Free the callframe interp->framePtr = interp->framePtr->parent; JimFreeCallFrame(interp, callFramePtr, JIM_FCF_REUSE); // Now chain any tailcalls in the parent frame if (interp->framePtr->tailcallObj) { do { Jim_Obj *tailcallObj = interp->framePtr->tailcallObj; interp->framePtr->tailcallObj = NULL; if (retcode == JIM_EVAL) { retcode = Jim_EvalObjList(interp, tailcallObj); // If the result of the tailcall is 'return', push it up to the caller if (retcode == JIM_RETURN) interp->returnLevel++; } Jim_DecrRefCount(interp, tailcallObj); } while (interp->framePtr->tailcallObj); // If the tailcall chain finished early, may need to manually discard the command if (interp->framePtr->tailcallCmd) { JimDecrCmdRefCount(interp, interp->framePtr->tailcallCmd); interp->framePtr->tailcallCmd = NULL; } } // Handle the JIM_RETURN return code if (retcode == JIM_RETURN) { if (--interp->returnLevel <= 0) { retcode = interp->returnCode; interp->returnCode = JIM_OK; interp->returnLevel = 0; } } else if (retcode == JIM_ERROR) { interp->addStackTrace++; Jim_DecrRefCount(interp, interp->errorProc); interp->errorProc = argv[0]; Jim_IncrRefCount(interp->errorProc); } return retcode; } __device__ int Jim_EvalSource(Jim_Interp *interp, const char *filename, int lineno, const char *script) { int retval; Jim_Obj *scriptObjPtr = Jim_NewStringObj(interp, script, -1); Jim_IncrRefCount(scriptObjPtr); if (filename) { Jim_Obj *prevScriptObj; JimSetSourceInfo(interp, scriptObjPtr, Jim_NewStringObj(interp, filename, -1), lineno); prevScriptObj = interp->currentScriptObj; interp->currentScriptObj = scriptObjPtr; retval = Jim_EvalObj(interp, scriptObjPtr); interp->currentScriptObj = prevScriptObj; } else retval = Jim_EvalObj(interp, scriptObjPtr); Jim_DecrRefCount(interp, scriptObjPtr); return retval; } __device__ int Jim_Eval(Jim_Interp *interp, const char *script) { return Jim_EvalObj(interp, Jim_NewStringObj(interp, script, -1)); } // Execute script in the scope of the global level __device__ int Jim_EvalGlobal(Jim_Interp *interp, const char *script) { Jim_CallFrame *savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; int retval = Jim_Eval(interp, script); interp->framePtr = savedFramePtr; return retval; } __device__ int Jim_EvalFileGlobal(Jim_Interp *interp, const char *filename) { Jim_CallFrame *savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; int retval = Jim_EvalFile(interp, filename); interp->framePtr = savedFramePtr; return retval; } #include <sys/statcu.h> __device__ int Jim_EvalFile(Jim_Interp *interp, const char *filename) { FILE *fp; struct stat sb; if (stat(filename, &sb) != 0 || (fp = fopen(filename, "rt")) == NULL) { Jim_SetResultFormatted(interp, "couldn't read file \"%s\": %s", filename, strerror(errno)); return JIM_ERROR; } if (sb.st_size == 0) { fclose(fp); return JIM_OK; } char *buf = (char *)Jim_Alloc(sb.st_size + 1); int readlen = (int)fread(buf, 1, sb.st_size, fp); if (ferror(fp)) { fclose(fp); Jim_Free(buf); Jim_SetResultFormatted(interp, "failed to load file \"%s\": %s", filename, strerror(errno)); return JIM_ERROR; } fclose(fp); buf[readlen] = 0; Jim_Obj *scriptObjPtr = Jim_NewStringObjNoAlloc(interp, buf, readlen); JimSetSourceInfo(interp, scriptObjPtr, Jim_NewStringObj(interp, filename, -1), 1); Jim_IncrRefCount(scriptObjPtr); Jim_Obj *prevScriptObj = interp->currentScriptObj; interp->currentScriptObj = scriptObjPtr; int retcode = Jim_EvalObj(interp, scriptObjPtr); // Handle the JIM_RETURN return code if (retcode == JIM_RETURN) { if (--interp->returnLevel <= 0) { retcode = interp->returnCode; interp->returnCode = JIM_OK; interp->returnLevel = 0; } } // EvalFile changes context, so add a stack frame here if (retcode == JIM_ERROR) interp->addStackTrace++; interp->currentScriptObj = prevScriptObj; Jim_DecrRefCount(interp, scriptObjPtr); return retcode; } #pragma endregion // ----------------------------------------------------------------------------- // Subst // ----------------------------------------------------------------------------- #pragma region Subst static __device__ void JimParseSubst(struct JimParserCtx *pc, int flags) { pc->tstart = pc->p; pc->tline = pc->linenr; if (pc->len == 0) { pc->tend = pc->p; pc->tt = JIM_TT_EOL; pc->eof = 1; return; } if (*pc->p == '[' && !(flags & JIM_SUBST_NOCMD)) { JimParseCmd(pc); return; } if (*pc->p == '$' && !(flags & JIM_SUBST_NOVAR)) { if (JimParseVar(pc) == JIM_OK) return; // Not a var, so treat as a string pc->tstart = pc->p; flags |= JIM_SUBST_NOVAR; } while (pc->len) { if (*pc->p == '$' && !(flags & JIM_SUBST_NOVAR)) break; if (*pc->p == '[' && !(flags & JIM_SUBST_NOCMD)) break; if (*pc->p == '\\' && pc->len > 1) { pc->p++; pc->len--; } pc->p++; pc->len--; } pc->tend = pc->p - 1; pc->tt = (flags & JIM_SUBST_NOESC) ? JIM_TT_STR : JIM_TT_ESC; } // The subst object type reuses most of the data structures and functions of the script object. Script's data structures are a bit more complex // for what is needed for [subst]itution tasks, but the reuse helps to deal with a single data structure at the cost of some more memory usage for substitutions. // This method takes the string representation of an object as a Tcl string where to perform [subst]itution, and generates the pre-parsed internal representation. static __device__ int SetSubstFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr, int flags) { int scriptTextLen; const char *scriptText = Jim_GetString(objPtr, &scriptTextLen); // Initially parse the subst into tokens (in tokenlist) ParseTokenList tokenlist; ScriptTokenListInit(&tokenlist); struct JimParserCtx parser; JimParserInit(&parser, scriptText, scriptTextLen, 1); while (1) { JimParseSubst(&parser, flags); if (parser.eof) // Note that subst doesn't need the EOL token break; ScriptAddToken(&tokenlist, parser.tstart, (int)(parser.tend - parser.tstart + 1), parser.tt, parser.tline); } // Create the "real" subst/script tokens from the initial token list struct ScriptObj *script = (ScriptObj *)Jim_Alloc(sizeof(*script)); script->inUse = 1; script->substFlags = flags; script->fileNameObj = interp->emptyObj; Jim_IncrRefCount(script->fileNameObj); SubstObjAddTokens(interp, script, &tokenlist); // No longer need the token list ScriptTokenListFree(&tokenlist); #ifdef DEBUG_SHOW_SUBST { printf("==== Subst ====\n"); for (int i = 0; i < script->len; i++) printf("[%2d] %s '%s'\n", i, jim_tt_name(script->token[i].type), Jim_String(script->token[i].objPtr)); } #endif // Free the old internal rep and set the new one Jim_FreeIntRep(interp, objPtr); Jim_SetIntRepPtr(objPtr, script); objPtr->typePtr = &_scriptObjType; return JIM_OK; } static __device__ ScriptObj *Jim_GetSubst(Jim_Interp *interp, Jim_Obj *objPtr, int flags) { if (objPtr->typePtr != &_scriptObjType || ((ScriptObj *)Jim_GetIntRepPtr(objPtr))->substFlags != flags) SetSubstFromAny(interp, objPtr, flags); return (ScriptObj *) Jim_GetIntRepPtr(objPtr); } // Performs commands,variables,blackslashes substitution, storing the result object (with refcount 0) into resObjPtrPtr. __device__ int Jim_SubstObj(Jim_Interp *interp, Jim_Obj *substObjPtr, Jim_Obj **resObjPtrPtr, int flags) { ScriptObj *script = Jim_GetSubst(interp, substObjPtr, flags); Jim_IncrRefCount(substObjPtr); // Make sure it's shared // In order to preserve the internal rep, we increment the inUse field of the script internal rep structure. script->inUse++; *resObjPtrPtr = JimInterpolateTokens(interp, script->token, script->len, flags); script->inUse--; Jim_DecrRefCount(interp, substObjPtr); return (*resObjPtrPtr == NULL ? JIM_ERROR : JIM_OK); } #pragma endregion // ----------------------------------------------------------------------------- // Core commands utility functions // ----------------------------------------------------------------------------- #pragma region Core commands utility functions __device__ void Jim_WrongNumArgs(Jim_Interp *interp, int argc, Jim_Obj *const *argv, const char *msg) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, argv, argc); if (*msg) Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, msg, -1)); Jim_IncrRefCount(listObjPtr); Jim_Obj *objPtr = Jim_ListJoin(interp, listObjPtr, " ", 1); Jim_DecrRefCount(interp, listObjPtr); Jim_IncrRefCount(objPtr); Jim_SetResultFormatted(interp, "wrong # args: should be \"%#s\"", objPtr); Jim_DecrRefCount(interp, objPtr); } // May add the key and/or value to the list. typedef void JimHashtableIteratorCallbackType(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type); #define JimTrivialMatch(pattern) (strpbrk((pattern), "*[?\\") == NULL) // For each key of the hash table 'ht' (with string keys) which matches the glob pattern (all if NULL), invoke the callback to add entries to a list. Returns the list. static __device__ Jim_Obj *JimHashtablePatternMatch(Jim_Interp *interp, Jim_HashTable *ht, Jim_Obj *patternObjPtr, JimHashtableIteratorCallbackType *callback, int type) { Jim_HashEntry *he; Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); // Check for the non-pattern case. We can do this much more efficiently. if (patternObjPtr && JimTrivialMatch(Jim_String(patternObjPtr))) { he = Jim_FindHashEntry(ht, Jim_String(patternObjPtr)); if (he) callback(interp, listObjPtr, he, type); } else { Jim_HashTableIterator htiter; JimInitHashTableIterator(ht, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) if (patternObjPtr == NULL || JimGlobMatch(Jim_String(patternObjPtr), (const char *)he->key, 0)) callback(interp, listObjPtr, he, type); } return listObjPtr; } // Keep these in order #define JIM_CMDLIST_COMMANDS 0 #define JIM_CMDLIST_PROCS 1 #define JIM_CMDLIST_CHANNELS 2 // Adds matching command names (procs, channels) to the list. static __device__ void JimCommandMatch(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type) { Jim_Cmd *cmdPtr = (Jim_Cmd *)Jim_GetHashEntryVal(he); // not a proc if (type == JIM_CMDLIST_PROCS && !cmdPtr->isproc) return; Jim_Obj *objPtr = Jim_NewStringObj(interp, (const char *)he->key, -1); Jim_IncrRefCount(objPtr); if (type != JIM_CMDLIST_CHANNELS || Jim_AioFilehandle(interp, objPtr)) Jim_ListAppendElement(interp, listObjPtr, objPtr); Jim_DecrRefCount(interp, objPtr); } // type is JIM_CMDLIST_xxx static __device__ Jim_Obj *JimCommandsList(Jim_Interp *interp, Jim_Obj *patternObjPtr, int type) { return JimHashtablePatternMatch(interp, &interp->commands, patternObjPtr, JimCommandMatch, type); } // Keep these in order #define JIM_VARLISTGLOBAL_S 0 #define JIM_VARLIST_LOCALS 1 #define JIM_VARLIST_VARS 2 #define JIM_VARLIST_VALUES 0x1000 // Adds matching variable names to the list static __device__ void JimVariablesMatch(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type) { Jim_Var *varPtr = (Jim_Var *)Jim_GetHashEntryVal(he); if (type != JIM_VARLIST_LOCALS || varPtr->linkFramePtr == NULL) { Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, (const char *)he->key, -1)); if (type & JIM_VARLIST_VALUES) Jim_ListAppendElement(interp, listObjPtr, varPtr->objPtr); } } // mode is JIM_VARLIST_xxx static __device__ Jim_Obj *JimVariablesList(Jim_Interp *interp, Jim_Obj *patternObjPtr, int mode) { // For [info locals], if we are at top level an emtpy list is returned. I don't agree, but we aim at compatibility (SS) if (mode == JIM_VARLIST_LOCALS && interp->framePtr == interp->topFramePtr) return interp->emptyObj; else { Jim_CallFrame *framePtr = (mode == JIM_VARLISTGLOBAL_S ? interp->topFramePtr : interp->framePtr); return JimHashtablePatternMatch(interp, &framePtr->vars, patternObjPtr, JimVariablesMatch, mode); } } static __device__ int JimInfoLevel(Jim_Interp *interp, Jim_Obj *levelObjPtr, Jim_Obj **objPtrPtr, int info_level_cmd) { Jim_CallFrame *targetCallFrame = JimGetCallFrameByInteger(interp, levelObjPtr); if (targetCallFrame == NULL) return JIM_ERROR; // No proc call at toplevel callframe if (targetCallFrame == interp->topFramePtr) { Jim_SetResultFormatted(interp, "bad level \"%#s\"", levelObjPtr); return JIM_ERROR; } if (info_level_cmd) *objPtrPtr = Jim_NewListObj(interp, targetCallFrame->argv, targetCallFrame->argc); else { Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); Jim_ListAppendElement(interp, listObj, targetCallFrame->argv[0]); Jim_ListAppendElement(interp, listObj, targetCallFrame->fileNameObj); Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, targetCallFrame->line)); *objPtrPtr = listObj; } return JIM_OK; } #pragma endregion // ----------------------------------------------------------------------------- // Core commands // ----------------------------------------------------------------------------- #pragma region Core commands // fake [puts] -- not the real puts, just for debugging. static __device__ int Jim_PutsCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "?-nonewline? string"); return JIM_ERROR; } if (argc == 3) { if (!Jim_CompareStringImmediate(interp, argv[1], "-nonewline")) { Jim_SetResultString(interp, "The second argument must " "be -nonewline", -1); return JIM_ERROR; } else fputs(Jim_String(argv[2]), stdout); } else puts(Jim_String(argv[1])); return JIM_OK; } // Helper for [+] and [*] static __device__ int JimAddMulHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int op) { int i; jim_wide res = (op == JIM_EXPROP_ADD ? 0 : 1); for (i = 1; i < argc; i++) { jim_wide wideValue; if (Jim_GetWide(interp, argv[i], &wideValue) != JIM_OK) goto trydouble; if (op == JIM_EXPROP_ADD) res += wideValue; else res *= wideValue; } Jim_SetResultInt(interp, res); return JIM_OK; trydouble: double doubleRes = (double)res; for (; i < argc; i++) { double doubleValue; if (Jim_GetDouble(interp, argv[i], &doubleValue) != JIM_OK) return JIM_ERROR; if (op == JIM_EXPROP_ADD) doubleRes += doubleValue; else doubleRes *= doubleValue; } Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); return JIM_OK; } /* Helper for [-] and [/] */ static __device__ int JimSubDivHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int op) { jim_wide wideValue, res = 0; double doubleValue, doubleRes = 0; int i = 2; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "number ?number ... number?"); return JIM_ERROR; } else if (argc == 2) { // The arity = 2 case is different. For [- x] returns -x, while [/ x] returns 1/x. if (Jim_GetWide(interp, argv[1], &wideValue) != JIM_OK) { if (Jim_GetDouble(interp, argv[1], &doubleValue) != JIM_OK) return JIM_ERROR; else { if (op == JIM_EXPROP_SUB) doubleRes = -doubleValue; else doubleRes = 1.0 / doubleValue; Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); return JIM_OK; } } if (op == JIM_EXPROP_SUB) { res = -wideValue; Jim_SetResultInt(interp, res); } else { doubleRes = 1.0 / wideValue; Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); } return JIM_OK; } else { if (Jim_GetWide(interp, argv[1], &res) != JIM_OK) { if (Jim_GetDouble(interp, argv[1], &doubleRes) != JIM_OK) { return JIM_ERROR; } else { goto trydouble; } } } for (i = 2; i < argc; i++) { if (Jim_GetWide(interp, argv[i], &wideValue) != JIM_OK) { doubleRes = (double)res; goto trydouble; } if (op == JIM_EXPROP_SUB) res -= wideValue; else res /= wideValue; } Jim_SetResultInt(interp, res); return JIM_OK; trydouble: for (; i < argc; i++) { if (Jim_GetDouble(interp, argv[i], &doubleValue) != JIM_OK) return JIM_ERROR; if (op == JIM_EXPROP_SUB) doubleRes -= doubleValue; else doubleRes /= doubleValue; } Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); return JIM_OK; } // [+] static __device__ int Jim_AddCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimAddMulHelper(interp, argc, argv, JIM_EXPROP_ADD); } // [*] static __device__ int Jim_MulCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimAddMulHelper(interp, argc, argv, JIM_EXPROP_MUL); } // [-] static __device__ int Jim_SubCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimSubDivHelper(interp, argc, argv, JIM_EXPROP_SUB); } // [/] static __device__ int Jim_DivCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimSubDivHelper(interp, argc, argv, JIM_EXPROP_DIV); } // [set] static __device__ int Jim_SetCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "varName ?newValue?"); return JIM_ERROR; } if (argc == 2) { Jim_Obj *objPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); if (!objPtr) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // argc == 3 case if (Jim_SetVariable(interp, argv[1], argv[2]) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, argv[2]); return JIM_OK; } // [unset] // unset ?-nocomplain? ?--? ?varName ...? static __device__ int Jim_UnsetCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int i = 1; int complain = 1; while (i < argc) { if (Jim_CompareStringImmediate(interp, argv[i], "--")) { i++; break; } if (Jim_CompareStringImmediate(interp, argv[i], "-nocomplain")) { complain = 0; i++; continue; } break; } while (i < argc) { if (Jim_UnsetVariable(interp, argv[i], complain ? JIM_ERRMSG : JIM_NONE) != JIM_OK && complain) return JIM_ERROR; i++; } return JIM_OK; } // [while] static __device__ int Jim_WhileCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "condition body"); return JIM_ERROR; } // The general purpose implementation of while starts here while (1) { int boolean, retval; if ((retval = Jim_GetBoolFromExpr(interp, argv[1], &boolean)) != JIM_OK) return retval; if (!boolean) break; if ((retval = Jim_EvalObj(interp, argv[2])) != JIM_OK) switch (retval) { case JIM_BREAK: goto out; case JIM_CONTINUE: continue; default: return retval; } } out: Jim_ResetResult(interp); return JIM_OK; } // [for] static __device__ int Jim_ForCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 5) { Jim_WrongNumArgs(interp, 1, argv, "start test next body"); return JIM_ERROR; } // Do the initialisation int retval; if ((retval = Jim_EvalObj(interp, argv[1])) != JIM_OK) return retval; // And do the first test now. Better for optimisation if we can do next/test at the bottom of the loop int boolean = 1; retval = Jim_GetBoolFromExpr(interp, argv[2], &boolean); Jim_Obj *varNamePtr = NULL; Jim_Obj *stopVarNamePtr = NULL; // Ready to do the body as follows: // while (1) { // body // check retcode // next // check retcode // test // check retcode/test bool // } #ifdef JIM_OPTIMIZATION // Check if the for is on the form: // for ... {$i < CONST} {incr i} // for ... {$i < $j} {incr i} if (retval == JIM_OK && boolean) { jim_wide stop, currentVal; Jim_Obj *objPtr; // Do it only if there aren't shared arguments ExprByteCode *expr = JimGetExpression(interp, argv[2]); ScriptObj *incrScript = JimGetScript(interp, argv[3]); // Ensure proper lengths to start if (incrScript == NULL || incrScript->len != 3 || !expr || expr->len != 3) goto evalstart; // Ensure proper token types if (incrScript->token[1].type != JIM_TT_ESC || expr->token[0].type != JIM_TT_VAR || (expr->token[1].type != JIM_TT_EXPR_INT && expr->token[1].type != JIM_TT_VAR)) goto evalstart; int cmpOffset; if (expr->token[2].type == JIM_EXPROP_LT) cmpOffset = 0; else if (expr->token[2].type == JIM_EXPROP_LTE) cmpOffset = 1; else goto evalstart; // Update command must be incr if (!Jim_CompareStringImmediate(interp, incrScript->token[1].objPtr, "incr")) goto evalstart; // incr, expression must be about the same variable if (!Jim_StringEqObj(incrScript->token[2].objPtr, expr->token[0].objPtr)) goto evalstart; // Get the stop condition (must be a variable or integer) if (expr->token[1].type == JIM_TT_EXPR_INT) { if (Jim_GetWide(interp, expr->token[1].objPtr, &stop) == JIM_ERROR) goto evalstart; } else { stopVarNamePtr = expr->token[1].objPtr; Jim_IncrRefCount(stopVarNamePtr); // Keep the compiler happy stop = 0; } // Initialization varNamePtr = expr->token[0].objPtr; Jim_IncrRefCount(varNamePtr); objPtr = Jim_GetVariable(interp, varNamePtr, JIM_NONE); if (objPtr == NULL || Jim_GetWide(interp, objPtr, &currentVal) != JIM_OK) goto testcond; // --- OPTIMIZED FOR --- while (retval == JIM_OK) { // === Check condition === // Note that currentVal is already set here // Immediate or Variable? get the 'stop' value if the latter if (stopVarNamePtr) { objPtr = Jim_GetVariable(interp, stopVarNamePtr, JIM_NONE); if (objPtr == NULL || Jim_GetWide(interp, objPtr, &stop) != JIM_OK) goto testcond; } if (currentVal >= stop + cmpOffset) break; // Eval body retval = Jim_EvalObj(interp, argv[4]); if (retval == JIM_OK || retval == JIM_CONTINUE) { retval = JIM_OK; objPtr = Jim_GetVariable(interp, varNamePtr, JIM_ERRMSG); // Increment if (objPtr == NULL) { retval = JIM_ERROR; goto out; } if (!Jim_IsShared(objPtr) && objPtr->typePtr == &_intObjType) { currentVal = ++JimWideValue(objPtr); Jim_InvalidateStringRep(objPtr); } else if (Jim_GetWide(interp, objPtr, &currentVal) != JIM_OK || Jim_SetVariable(interp, varNamePtr, Jim_NewIntObj(interp, ++currentVal)) != JIM_OK) goto evalnext; } } goto out; } evalstart: #endif while (boolean && (retval == JIM_OK || retval == JIM_CONTINUE)) { // Body retval = Jim_EvalObj(interp, argv[4]); if (retval == JIM_OK || retval == JIM_CONTINUE) { evalnext: // increment retval = Jim_EvalObj(interp, argv[3]); if (retval == JIM_OK || retval == JIM_CONTINUE) { testcond: // test retval = Jim_GetBoolFromExpr(interp, argv[2], &boolean); } } } out: if (stopVarNamePtr) Jim_DecrRefCount(interp, stopVarNamePtr); if (varNamePtr) Jim_DecrRefCount(interp, varNamePtr); if (retval == JIM_CONTINUE || retval == JIM_BREAK || retval == JIM_OK) { Jim_ResetResult(interp); return JIM_OK; } return retval; } // [loop] static __device__ int Jim_LoopCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 5 && argc != 6) { Jim_WrongNumArgs(interp, 1, argv, "var first limit ?incr? body"); return JIM_ERROR; } jim_wide i; jim_wide limit; jim_wide incr = 1; if (Jim_GetWide(interp, argv[2], &i) != JIM_OK || Jim_GetWide(interp, argv[3], &limit) != JIM_OK || (argc == 6 && Jim_GetWide(interp, argv[4], &incr) != JIM_OK)) return JIM_ERROR; Jim_Obj *bodyObjPtr = (argc == 5 ? argv[4] : argv[5]); int retval = Jim_SetVariable(interp, argv[1], argv[2]); while (((i < limit && incr > 0) || (i > limit && incr < 0)) && retval == JIM_OK) { retval = Jim_EvalObj(interp, bodyObjPtr); if (retval == JIM_OK || retval == JIM_CONTINUE) { Jim_Obj *objPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); retval = JIM_OK; // Increment i += incr; if (objPtr && !Jim_IsShared(objPtr) && objPtr->typePtr == &_intObjType) { if (argv[1]->typePtr != &_variableObjType) if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) return JIM_ERROR; JimWideValue(objPtr) = i; Jim_InvalidateStringRep(objPtr); // The following step is required in order to invalidate the string repr of "FOO" if the var name is of the form of "FOO(IDX)" if (argv[1]->typePtr != &_variableObjType) if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) { retval = JIM_ERROR; break; } } else { objPtr = Jim_NewIntObj(interp, i); retval = Jim_SetVariable(interp, argv[1], objPtr); if (retval != JIM_OK) Jim_FreeNewObj(interp, objPtr); } } } if (retval == JIM_OK || retval == JIM_CONTINUE || retval == JIM_BREAK) { Jim_ResetResult(interp); return JIM_OK; } return retval; } // List iterators make it easy to iterate over a list. At some point iterators will be expanded to support generators. typedef struct { Jim_Obj *objPtr; int idx; } Jim_ListIter; // Initialise the iterator at the start of the list. static __device__ void JimListIterInit(Jim_ListIter *iter, Jim_Obj *objPtr) { iter->objPtr = objPtr; iter->idx = 0; } // Returns the next object from the list, or NULL on end-of-list. static __device__ Jim_Obj *JimListIterNext(Jim_Interp *interp, Jim_ListIter *iter) { return (iter->idx >= Jim_ListLength(interp, iter->objPtr) ? NULL : iter->objPtr->internalRep.listValue.ele[iter->idx++]); } // Returns 1 if end-of-list has been reached. static __device__ int JimListIterDone(Jim_Interp *interp, Jim_ListIter *iter) { return iter->idx >= Jim_ListLength(interp, iter->objPtr); } // foreach + lmap implementation static __device__ int JimForeachMapHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int doMap) { int i; if (argc < 4 || argc % 2 != 0) { Jim_WrongNumArgs(interp, 1, argv, "varList list ?varList list ...? script"); return JIM_ERROR; } Jim_Obj *script = argv[argc - 1]; // Last argument is a script int numargs = (argc - 1 - 1); // argc - 'foreach' - script Jim_ListIter twoiters[2]; // Avoid allocation for a single list Jim_ListIter *iters = (numargs == 2 ? twoiters : (Jim_ListIter *)Jim_Alloc(numargs * sizeof(*iters))); int result = JIM_OK; for (i = 0; i < numargs; i++) { JimListIterInit(&iters[i], argv[i + 1]); if (i % 2 == 0 && JimListIterDone(interp, &iters[i])) result = JIM_ERROR; } if (result != JIM_OK) { Jim_SetResultString(interp, "foreach varlist is empty", -1); return result; } Jim_Obj *resultObj = (doMap ? Jim_NewListObj(interp, NULL, 0) : interp->emptyObj); Jim_IncrRefCount(resultObj); while (1) { // Have we expired all lists? for (i = 0; i < numargs; i += 2) if (!JimListIterDone(interp, &iters[i + 1])) break; // All done if (i == numargs) break; // For each list for (i = 0; i < numargs; i += 2) { // foreach var JimListIterInit(&iters[i], argv[i + 1]); Jim_Obj *varName; while ((varName = JimListIterNext(interp, &iters[i])) != NULL) { Jim_Obj *valObj = JimListIterNext(interp, &iters[i + 1]); // Ran out, so store the empty string if (!valObj) valObj = interp->emptyObj; // Avoid shimmering Jim_IncrRefCount(valObj); result = Jim_SetVariable(interp, varName, valObj); Jim_DecrRefCount(interp, valObj); if (result != JIM_OK) goto err; } } switch (result = Jim_EvalObj(interp, script)) { case JIM_OK: if (doMap) Jim_ListAppendElement(interp, resultObj, interp->result); break; case JIM_CONTINUE: break; case JIM_BREAK: goto out; default: goto err; } } out: result = JIM_OK; Jim_SetResult(interp, resultObj); err: Jim_DecrRefCount(interp, resultObj); if (numargs > 2) Jim_Free(iters); return result; } // [foreach] static __device__ int Jim_ForeachCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimForeachMapHelper(interp, argc, argv, 0); } // [lmap] static __device__ int Jim_LmapCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { return JimForeachMapHelper(interp, argc, argv, 1); } // [lassign] static __device__ int Jim_LassignCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varList list ?varName ...?"); return JIM_ERROR; } Jim_ListIter iter; JimListIterInit(&iter, argv[1]); int result = JIM_ERROR; for (int i = 2; i < argc; i++) { Jim_Obj *valObj = JimListIterNext(interp, &iter); result = Jim_SetVariable(interp, argv[i], valObj ? valObj : interp->emptyObj); if (result != JIM_OK) return result; } Jim_Obj *resultObj = Jim_NewListObj(interp, NULL, 0); while (!JimListIterDone(interp, &iter)) Jim_ListAppendElement(interp, resultObj, JimListIterNext(interp, &iter)); Jim_SetResult(interp, resultObj); return JIM_OK; } // [if] static __device__ int Jim_IfCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int boolean, retval, current = 1, falsebody = 0; if (argc >= 3) { while (1) { // Far not enough arguments given! if (current >= argc) goto err; if ((retval = Jim_GetBoolFromExpr(interp, argv[current++], &boolean)) != JIM_OK) return retval; // There lacks something, isn't it? if (current >= argc) goto err; if (Jim_CompareStringImmediate(interp, argv[current], "then")) current++; // Tsk tsk, no then-clause? if (current >= argc) goto err; if (boolean) return Jim_EvalObj(interp, argv[current]); // Ok: no else-clause follows if (++current >= argc) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); return JIM_OK; } falsebody = current++; if (Jim_CompareStringImmediate(interp, argv[falsebody], "else")) { // IIICKS - else-clause isn't last cmd? if (current != argc - 1) goto err; return Jim_EvalObj(interp, argv[current]); } // Ok: elseif follows meaning all the stuff again (how boring...) else if (Jim_CompareStringImmediate(interp, argv[falsebody], "elseif")) continue; // OOPS - else-clause is not last cmd? else if (falsebody != argc - 1) goto err; return Jim_EvalObj(interp, argv[falsebody]); } //return JIM_OK; // unreached } err: Jim_WrongNumArgs(interp, 1, argv, "condition ?then? trueBody ?elseif ...? ?else? falseBody"); return JIM_ERROR; } // Returns 1 if match, 0 if no match or -<error> on error (e.g. -JIM_ERROR, -JIM_BREAK) __device__ int Jim_CommandMatchObj(Jim_Interp *interp, Jim_Obj *commandObj, Jim_Obj *patternObj, Jim_Obj *stringObj, int nocase) { int argc = 0; Jim_Obj *parms[4]; parms[argc++] = commandObj; if (nocase) parms[argc++] = Jim_NewStringObj(interp, "-nocase", -1); parms[argc++] = patternObj; parms[argc++] = stringObj; int rc = Jim_EvalObjVector(interp, argc, parms); long eq; if (rc != JIM_OK || Jim_GetLong(interp, Jim_GetResult(interp), &eq) != JIM_OK) eq = -rc; return eq; } enum { SWITCH_EXACT, SWITCH_GLOB, SWITCH_RE, SWITCH_CMD }; // [switch] static __device__ int Jim_SwitchCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int matchOpt = SWITCH_EXACT, opt = 1, patCount, i; Jim_Obj *command = 0, *const *caseList = 0, *strObj; Jim_Obj *script = 0; if (argc < 3) { wrongnumargs: Jim_WrongNumArgs(interp, 1, argv, "?options? string pattern body ... ?default body? or {pattern body ?pattern body ...?}"); return JIM_ERROR; } for (opt = 1; opt < argc; ++opt) { const char *option = Jim_String(argv[opt]); if (*option != '-') break; else if (!strncmp(option, "--", 2)) { ++opt; break; } else if (!strncmp(option, "-exact", 2)) matchOpt = SWITCH_EXACT; else if (!strncmp(option, "-glob", 2)) matchOpt = SWITCH_GLOB; else if (!strncmp(option, "-regexp", 2)) matchOpt = SWITCH_RE; else if (!strncmp(option, "-command", 2)) { matchOpt = SWITCH_CMD; if ((argc - opt) < 2) goto wrongnumargs; command = argv[++opt]; } else { Jim_SetResultFormatted(interp, "bad option \"%#s\": must be -exact, -glob, -regexp, -command procname or --", argv[opt]); return JIM_ERROR; } if ((argc - opt) < 2) goto wrongnumargs; } strObj = argv[opt++]; patCount = argc - opt; if (patCount == 1) { Jim_Obj **vector; JimListGetElements(interp, argv[opt], &patCount, &vector); caseList = vector; } else caseList = &argv[opt]; if (patCount == 0 || patCount % 2 != 0) goto wrongnumargs; for (i = 0; script == 0 && i < patCount; i += 2) { Jim_Obj *patObj = caseList[i]; if (!Jim_CompareStringImmediate(interp, patObj, "default") || i < (patCount - 2)) switch (matchOpt) { case SWITCH_EXACT: if (Jim_StringEqObj(strObj, patObj)) script = caseList[i + 1]; break; case SWITCH_GLOB: if (Jim_StringMatchObj(interp, patObj, strObj, 0)) script = caseList[i + 1]; break; case SWITCH_RE: command = Jim_NewStringObj(interp, "regexp", -1); // Fall thru intentionally case SWITCH_CMD:{ int rc = Jim_CommandMatchObj(interp, command, patObj, strObj, 0); // After the execution of a command we need to make sure to reconvert the object into a list again. Only for the single-list style [switch]. if (argc - opt == 1) { Jim_Obj **vector; JimListGetElements(interp, argv[opt], &patCount, &vector); caseList = vector; } // command is here already decref'd if (rc < 0) return -rc; if (rc) script = caseList[i + 1]; break; } } else script = caseList[i + 1]; } for (; i < patCount && Jim_CompareStringImmediate(interp, script, "-"); i += 2) script = caseList[i + 1]; if (script && Jim_CompareStringImmediate(interp, script, "-")) { Jim_SetResultFormatted(interp, "no body specified for pattern \"%#s\"", caseList[i - 2]); return JIM_ERROR; } Jim_ResetResult(interp); return (script ? Jim_EvalObj(interp, script) : JIM_OK); } // [list] static __device__ int Jim_ListCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, argv + 1, argc - 1); Jim_SetResult(interp, listObjPtr); return JIM_OK; } // [lindex] static __device__ int Jim_LindexCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "list ?index ...?"); return JIM_ERROR; } Jim_Obj *objPtr = argv[1]; Jim_IncrRefCount(objPtr); for (int i = 2; i < argc; i++) { Jim_Obj *listObjPtr = objPtr; int idx; if (Jim_GetIndex(interp, argv[i], &idx) != JIM_OK) { Jim_DecrRefCount(interp, listObjPtr); return JIM_ERROR; } if (Jim_ListIndex(interp, listObjPtr, idx, &objPtr, JIM_NONE) != JIM_OK) { // Returns an empty object if the index is out of range Jim_DecrRefCount(interp, listObjPtr); Jim_ResetResult(interp); return JIM_OK; } Jim_IncrRefCount(objPtr); Jim_DecrRefCount(interp, listObjPtr); } Jim_SetResult(interp, objPtr); Jim_DecrRefCount(interp, objPtr); return JIM_OK; } // [llength] static __device__ int Jim_LlengthCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "list"); return JIM_ERROR; } Jim_SetResultInt(interp, Jim_ListLength(interp, argv[1])); return JIM_OK; } // [lsearch] __constant__ static const char * const _lsearch_options[] = { "-bool", "-not", "-nocase", "-exact", "-glob", "-regexp", "-all", "-inline", "-command", NULL }; static __device__ int Jim_LsearchCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_BOOL, OPT_NOT, OPT_NOCASE, OPT_EXACT, OPT_GLOB, OPT_REGEXP, OPT_ALL, OPT_INLINE, OPT_COMMAND }; int i; int opt_bool = 0; int opt_not = 0; int opt_nocase = 0; int opt_all = 0; int opt_inline = 0; int opt_match = OPT_EXACT; int rc = JIM_OK; Jim_Obj *listObjPtr = NULL; Jim_Obj *commandObj = NULL; if (argc < 3) { wrongargs: Jim_WrongNumArgs(interp, 1, argv, "?-exact|-glob|-regexp|-command 'command'? ?-bool|-inline? ?-not? ?-nocase? ?-all? list value"); return JIM_ERROR; } for (i = 1; i < argc - 2; i++) { int option; if (Jim_GetEnum(interp, argv[i], _lsearch_options, &option, NULL, JIM_ERRMSG) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_BOOL: opt_bool = 1; opt_inline = 0; break; case OPT_NOT: opt_not = 1; break; case OPT_NOCASE: opt_nocase = 1; break; case OPT_INLINE: opt_inline = 1; opt_bool = 0; break; case OPT_ALL: opt_all = 1; break; case OPT_COMMAND: if (i >= argc - 2) goto wrongargs; commandObj = argv[++i]; // fallthru case OPT_EXACT: case OPT_GLOB: case OPT_REGEXP: opt_match = option; break; } } argv += i; if (opt_all) listObjPtr = Jim_NewListObj(interp, NULL, 0); if (opt_match == OPT_REGEXP) commandObj = Jim_NewStringObj(interp, "regexp", -1); if (commandObj) Jim_IncrRefCount(commandObj); int listlen = Jim_ListLength(interp, argv[0]); for (i = 0; i < listlen; i++) { int eq = 0; Jim_Obj *objPtr = Jim_ListGetIndex(interp, argv[0], i); switch (opt_match) { case OPT_EXACT: eq = Jim_StringCompareObj(interp, argv[1], objPtr, opt_nocase) == 0; break; case OPT_GLOB: eq = Jim_StringMatchObj(interp, argv[1], objPtr, opt_nocase); break; case OPT_REGEXP: case OPT_COMMAND: eq = Jim_CommandMatchObj(interp, commandObj, argv[1], objPtr, opt_nocase); if (eq < 0) { if (listObjPtr) Jim_FreeNewObj(interp, listObjPtr); rc = JIM_ERROR; goto done; } break; } // If we have a non-match with opt_bool, opt_not, !opt_all, can't exit early if (!eq && opt_bool && opt_not && !opt_all) continue; if ((!opt_bool && eq == !opt_not) || (opt_bool && (eq || opt_all))) { // Got a match (or non-match for opt_not), or (opt_bool && opt_all) Jim_Obj *resultObj; if (opt_bool) resultObj = Jim_NewIntObj(interp, eq ^ opt_not); else if (!opt_inline) resultObj = Jim_NewIntObj(interp, i); else resultObj = objPtr; if (opt_all) Jim_ListAppendElement(interp, listObjPtr, resultObj); else { Jim_SetResult(interp, resultObj); goto done; } } } if (opt_all) Jim_SetResult(interp, listObjPtr); else { // No match if (opt_bool) Jim_SetResultBool(interp, opt_not); else if (!opt_inline) Jim_SetResultInt(interp, -1); } done: if (commandObj) Jim_DecrRefCount(interp, commandObj); return rc; } // [lappend] static __device__ int Jim_LappendCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?value value ...?"); return JIM_ERROR; } Jim_Obj *listObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); if (!listObjPtr) { // Create the list if it does not exist listObjPtr = Jim_NewListObj(interp, NULL, 0); if (Jim_SetVariable(interp, argv[1], listObjPtr) != JIM_OK) { Jim_FreeNewObj(interp, listObjPtr); return JIM_ERROR; } } int shared = Jim_IsShared(listObjPtr); if (shared) listObjPtr = Jim_DuplicateObj(interp, listObjPtr); for (int i = 2; i < argc; i++) Jim_ListAppendElement(interp, listObjPtr, argv[i]); if (Jim_SetVariable(interp, argv[1], listObjPtr) != JIM_OK) { if (shared) Jim_FreeNewObj(interp, listObjPtr); return JIM_ERROR; } Jim_SetResult(interp, listObjPtr); return JIM_OK; } // [linsert] static __device__ int Jim_LinsertCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "list index ?element ...?"); return JIM_ERROR; } Jim_Obj *listPtr = argv[1]; if (Jim_IsShared(listPtr)) listPtr = Jim_DuplicateObj(interp, listPtr); int idx; if (Jim_GetIndex(interp, argv[2], &idx) != JIM_OK) goto err; int len = Jim_ListLength(interp, listPtr); if (idx >= len) idx = len; else if (idx < 0) idx = len + idx + 1; Jim_ListInsertElements(interp, listPtr, idx, argc - 3, &argv[3]); Jim_SetResult(interp, listPtr); return JIM_OK; err: if (listPtr != argv[1]) Jim_FreeNewObj(interp, listPtr); return JIM_ERROR; } // [lreplace] static __device__ int Jim_LreplaceCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 4) { Jim_WrongNumArgs(interp, 1, argv, "list first last ?element ...?"); return JIM_ERROR; } int first, last; if (Jim_GetIndex(interp, argv[2], &first) != JIM_OK || Jim_GetIndex(interp, argv[3], &last) != JIM_OK) return JIM_ERROR; Jim_Obj *listObj = argv[1]; int len = Jim_ListLength(interp, listObj); first = JimRelToAbsIndex(len, first); last = JimRelToAbsIndex(len, last); int rangeLen; JimRelToAbsRange(len, &first, &last, &rangeLen); // Now construct a new list which consists of: <elements before first> <supplied elements> <elements after last> // Check to see if trying to replace past the end of the list if (first < len) { } // OK. Not past the end else if (len == 0) first = 0; // Special for empty list, adjust first to 0 else { Jim_SetResultString(interp, "list doesn't contain element ", -1); Jim_AppendObj(interp, Jim_GetResult(interp), argv[2]); return JIM_ERROR; } // Add the first set of elements Jim_Obj *newListObj = Jim_NewListObj(interp, listObj->internalRep.listValue.ele, first); // Add supplied elements ListInsertElements(newListObj, -1, argc - 4, argv + 4); // Add the remaining elements ListInsertElements(newListObj, -1, len - first - rangeLen, listObj->internalRep.listValue.ele + first + rangeLen); Jim_SetResult(interp, newListObj); return JIM_OK; } // [lset] static __device__ int Jim_LsetCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "listVar ?index...? newVal"); return JIM_ERROR; } else if (argc == 3) { // With no indexes, simply implements [set] if (Jim_SetVariable(interp, argv[1], argv[2], 0) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, argv[2]); return JIM_OK; } return Jim_ListSetIndex(interp, argv[1], argv + 2, argc - 3, argv[argc - 1]); } // [lsort] __constant__ static const char * const _lsort_options[] = { "-ascii", "-nocase", "-increasing", "-decreasing", "-command", "-integer", "-real", "-index", "-unique", NULL }; static __device__ int Jim_LsortCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const argv[]) { enum { OPT_ASCII, OPT_NOCASE, OPT_INCREASING, OPT_DECREASING, OPT_COMMAND, OPT_INTEGER, OPT_REAL, OPT_INDEX, OPT_UNIQUE }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "?options? list"); return JIM_ERROR; } struct lsort_info info; info.type = lsort_info::JIM_LSORT_ASCII; info.order = 1; info.indexed = 0; info.unique = 0; info.command = NULL; info.interp = interp; for (int i = 1; i < (argc - 1); i++) { int option; if (Jim_GetEnum(interp, argv[i], _lsort_options, &option, NULL, JIM_ENUM_ABBREV | JIM_ERRMSG) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_ASCII: info.type = lsort_info::JIM_LSORT_ASCII; break; case OPT_NOCASE: info.type = lsort_info::JIM_LSORT_NOCASE; break; case OPT_INTEGER: info.type = lsort_info::JIM_LSORT_INTEGER; break; case OPT_REAL: info.type = lsort_info::JIM_LSORT_REAL; break; case OPT_INCREASING: info.order = 1; break; case OPT_DECREASING: info.order = -1; break; case OPT_UNIQUE: info.unique = 1; break; case OPT_COMMAND: if (i >= (argc - 2)) { Jim_SetResultString(interp, "\"-command\" option must be followed by comparison command", -1); return JIM_ERROR; } info.type = lsort_info::JIM_LSORT_COMMAND; info.command = argv[i + 1]; i++; break; case OPT_INDEX: if (i >= (argc - 2)) { Jim_SetResultString(interp, "\"-index\" option must be followed by list index", -1); return JIM_ERROR; } if (Jim_GetIndex(interp, argv[i + 1], &info.index) != JIM_OK) return JIM_ERROR; info.indexed = 1; i++; break; } } Jim_Obj *resObj = Jim_DuplicateObj(interp, argv[argc - 1]); int retCode = ListSortElements(interp, resObj, &info); if (retCode == JIM_OK) Jim_SetResult(interp, resObj); else Jim_FreeNewObj(interp, resObj); return retCode; } // [append] static __device__ int Jim_AppendCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?value ...?"); return JIM_ERROR; } Jim_Obj *stringObjPtr; if (argc == 2) { stringObjPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); if (!stringObjPtr) return JIM_ERROR; } else { int freeobj = 0; stringObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); if (!stringObjPtr) { // Create the string if it doesn't exist stringObjPtr = Jim_NewEmptyStringObj(interp); freeobj = 1; } else if (Jim_IsShared(stringObjPtr)) { freeobj = 1; stringObjPtr = Jim_DuplicateObj(interp, stringObjPtr); } for (int i = 2; i < argc; i++) Jim_AppendObj(interp, stringObjPtr, argv[i]); if (Jim_SetVariable(interp, argv[1], stringObjPtr) != JIM_OK) { if (freeobj) Jim_FreeNewObj(interp, stringObjPtr); return JIM_ERROR; } } Jim_SetResult(interp, stringObjPtr); return JIM_OK; } // [debug] #if defined(JIM_DEBUG_COMMAND) && !defined(JIM_BOOTSTRAP) __constant__ static const char *const _debug_options[] = { "refcount", "objcount", "objects", "invstr", "scriptlen", "exprlen", "exprbc", "show", NULL }; #endif static __device__ int Jim_DebugCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { #if defined(JIM_DEBUG_COMMAND) && !defined(JIM_BOOTSTRAP) enum { OPT_REFCOUNT, OPT_OBJCOUNT, OPT_OBJECTS, OPT_INVSTR, OPT_SCRIPTLEN, OPT_EXPRLEN, OPT_EXPRBC, OPT_SHOW, }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?...?"); return JIM_ERROR; } int option; if (Jim_GetEnum(interp, argv[1], _debug_options, &option, "subcommand", JIM_ERRMSG) != JIM_OK) return JIM_ERROR; if (option == OPT_REFCOUNT) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "object"); return JIM_ERROR; } Jim_SetResultInt(interp, argv[2]->refCount); return JIM_OK; } else if (option == OPT_OBJCOUNT) { int freeobj = 0, liveobj = 0; if (argc != 2) { Jim_WrongNumArgs(interp, 2, argv, ""); return JIM_ERROR; } // Count the number of free objects Jim_Obj *objPtr = interp->freeList; while (objPtr) { freeobj++; objPtr = objPtr->nextObjPtr; } // Count the number of live objects objPtr = interp->liveList; while (objPtr) { liveobj++; objPtr = objPtr->nextObjPtr; } // Set the result string and return char buf[256]; sprintf(buf, "free %d used %d", freeobj, liveobj); Jim_SetResultString(interp, buf, -1); return JIM_OK; } else if (option == OPT_OBJECTS) { // Count the number of live objects Jim_Obj *objPtr = interp->liveList; Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); while (objPtr) { const char *type = (objPtr->typePtr ? objPtr->typePtr->name : ""); Jim_Obj *subListObjPtr = Jim_NewListObj(interp, NULL, 0); char buf[128]; sprintf(buf, "%p", objPtr); Jim_ListAppendElement(interp, subListObjPtr, Jim_NewStringObj(interp, buf, -1)); Jim_ListAppendElement(interp, subListObjPtr, Jim_NewStringObj(interp, type, -1)); Jim_ListAppendElement(interp, subListObjPtr, Jim_NewIntObj(interp, objPtr->refCount)); Jim_ListAppendElement(interp, subListObjPtr, objPtr); Jim_ListAppendElement(interp, listObjPtr, subListObjPtr); objPtr = objPtr->nextObjPtr; } Jim_SetResult(interp, listObjPtr); return JIM_OK; } else if (option == OPT_INVSTR) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "object"); return JIM_ERROR; } Jim_Obj *objPtr = argv[2]; if (objPtr->typePtr != NULL) Jim_InvalidateStringRep(objPtr); Jim_ResetResult(interp); return JIM_OK; } else if (option == OPT_SHOW) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "object"); return JIM_ERROR; } int len, charlen; const char *s = Jim_GetString(argv[2], &len); #ifdef JIM_UTF8 charlen = utf8_strlen(s, len); #else charlen = len; #endif printf("refcount: %d, type: %s\n", argv[2]->refCount, JimObjTypeName(argv[2])); printf("chars (%d): <<%s>>\n", charlen, s); printf("bytes (%d):", len); while (len--) printf(" %02x", (unsigned char)*s++); printf("\n"); return JIM_OK; } else if (option == OPT_SCRIPTLEN) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "script"); return JIM_ERROR; } ScriptObj *script = JimGetScript(interp, argv[2]); if (script == NULL) return JIM_ERROR; Jim_SetResultInt(interp, script->len); return JIM_OK; } else if (option == OPT_EXPRLEN) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "expression"); return JIM_ERROR; } ExprByteCode *expr = JimGetExpression(interp, argv[2]); if (expr == NULL) return JIM_ERROR; Jim_SetResultInt(interp, expr->len); return JIM_OK; } else if (option == OPT_EXPRBC) { if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "expression"); return JIM_ERROR; } ExprByteCode *expr = JimGetExpression(interp, argv[2]); if (expr == NULL) return JIM_ERROR; Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; i < expr->len; i++) { const char *type; const Jim_ExprOperator *op; Jim_Obj *obj = expr->token[i].objPtr; switch (expr->token[i].type) { case JIM_TT_EXPR_INT: type = "int"; break; case JIM_TT_EXPR_DOUBLE: type = "double"; break; case JIM_TT_CMD: type = "command"; break; case JIM_TT_VAR: type = "variable"; break; case JIM_TT_DICTSUGAR: type = "dictsugar"; break; case JIM_TT_EXPRSUGAR: type = "exprsugar"; break; case JIM_TT_ESC: type = "subst"; break; case JIM_TT_STR: type = "string"; break; default: op = JimExprOperatorInfoByOpcode(expr->token[i].type); type = (op == NULL ? "private" : "operator"); obj = Jim_NewStringObj(interp, op ? op->name : "", -1); break; } Jim_ListAppendElement(interp, objPtr, Jim_NewStringObj(interp, type, -1)); Jim_ListAppendElement(interp, objPtr, obj); } Jim_SetResult(interp, objPtr); return JIM_OK; } else { Jim_SetResultString(interp, "bad option. Valid options are refcount, " "objcount, objects, invstr", -1); return JIM_ERROR; } // unreached #endif // JIM_BOOTSTRAP #ifndef JIM_DEBUG_COMMAND Jim_SetResultString(interp, "unsupported", -1); return JIM_ERROR; #endif } // [eval] static __device__ int Jim_EvalCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "arg ?arg ...?"); return JIM_ERROR; } int rc = Jim_EvalObj(interp, argc == 2 ? argv[1] : Jim_ConcatObj(interp, argc - 1, argv + 1)); // eval is "interesting", so add a stack frame here if (rc == JIM_ERROR) interp->addStackTrace++; return rc; } // [uplevel] static __device__ int Jim_UplevelCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc >= 2) { // Save the old callframe pointer Jim_CallFrame *savedCallFrame = interp->framePtr; Jim_CallFrame *targetCallFrame; // Lookup the target frame pointer const char *str = Jim_String(argv[1]); if ((str[0] >= '0' && str[0] <= '9') || str[0] == '#') { targetCallFrame = Jim_GetCallFrameByLevel(interp, argv[1]); argc--; argv++; } else targetCallFrame = Jim_GetCallFrameByLevel(interp, NULL); if (targetCallFrame == NULL) return JIM_ERROR; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv - 1, "?level? command ?arg ...?"); return JIM_ERROR; } // Eval the code in the target callframe interp->framePtr = targetCallFrame; int retcode = Jim_EvalObj(interp, argc == 2 ? argv[1] : Jim_ConcatObj(interp, argc - 1, argv + 1)); interp->framePtr = savedCallFrame; return retcode; } else { Jim_WrongNumArgs(interp, 1, argv, "?level? command ?arg ...?"); return JIM_ERROR; } } // [expr] static __device__ int Jim_ExprCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *exprResultPtr; int retcode; if (argc == 2) retcode = Jim_EvalExpression(interp, argv[1], &exprResultPtr); else if (argc > 2) { Jim_Obj *objPtr = Jim_ConcatObj(interp, argc - 1, argv + 1); Jim_IncrRefCount(objPtr); retcode = Jim_EvalExpression(interp, objPtr, &exprResultPtr); Jim_DecrRefCount(interp, objPtr); } else { Jim_WrongNumArgs(interp, 1, argv, "expression ?...?"); return JIM_ERROR; } if (retcode != JIM_OK) return retcode; Jim_SetResult(interp, exprResultPtr); Jim_DecrRefCount(interp, exprResultPtr); return JIM_OK; } // [break] static __device__ int Jim_BreakCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, ""); return JIM_ERROR; } return JIM_BREAK; } // [continue] static __device__ int Jim_ContinueCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, ""); return JIM_ERROR; } return JIM_CONTINUE; } // [return] static __device__ int Jim_ReturnCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int i; Jim_Obj *stackTraceObj = NULL; Jim_Obj *errorCodeObj = NULL; int returnCode = JIM_OK; long level = 1; for (i = 1; i < argc - 1; i += 2) { if (Jim_CompareStringImmediate(interp, argv[i], "-code")) { if (Jim_GetReturnCode(interp, argv[i + 1], &returnCode) == JIM_ERROR) return JIM_ERROR; } else if (Jim_CompareStringImmediate(interp, argv[i], "-errorinfo")) stackTraceObj = argv[i + 1]; else if (Jim_CompareStringImmediate(interp, argv[i], "-errorcode")) errorCodeObj = argv[i + 1]; else if (Jim_CompareStringImmediate(interp, argv[i], "-level")) { if (Jim_GetLong(interp, argv[i + 1], &level) != JIM_OK || level < 0) { Jim_SetResultFormatted(interp, "bad level \"%#s\"", argv[i + 1]); return JIM_ERROR; } } else break; } if (i != argc - 1 && i != argc) Jim_WrongNumArgs(interp, 1, argv, "?-code code? ?-errorinfo stacktrace? ?-level level? ?result?"); // If a stack trace is supplied and code is error, set the stack trace if (stackTraceObj && returnCode == JIM_ERROR) JimSetStackTrace(interp, stackTraceObj); // If an error code list is supplied, set the global $errorCode if (errorCodeObj && returnCode == JIM_ERROR) Jim_SetVariableStr(interp, "errorCode", errorCodeObj, JIMGLOBAL_); interp->returnCode = returnCode; interp->returnLevel = level; if (i == argc - 1) Jim_SetResult(interp, argv[i]); return JIM_RETURN; } // [tailcall] static __device__ int Jim_TailcallCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (interp->framePtr->level == 0) { Jim_SetResultString(interp, "tailcall can only be called from a proc or lambda", -1); return JIM_ERROR; } else if (argc >= 2) { // Need to resolve the tailcall command in the current context Jim_CallFrame *cf = interp->framePtr->parent; Jim_Cmd *cmdPtr = Jim_GetCommand(interp, argv[1], JIM_ERRMSG); if (cmdPtr == NULL) return JIM_ERROR; JimPanic(cf->tailcallCmd != NULL, "Already have a tailcallCmd"); // And stash this pre-resolved command JimIncrCmdRefCount(cmdPtr); cf->tailcallCmd = cmdPtr; // And stash the command list JimPanic(cf->tailcallObj != NULL, "Already have a tailcallobj"); cf->tailcallObj = Jim_NewListObj(interp, argv + 1, argc - 1); Jim_IncrRefCount(cf->tailcallObj); // When the stack unwinds to the previous proc, the stashed command will be evaluated return JIM_EVAL; } return JIM_OK; } static __device__ int JimAliasCmd(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *prefixListObj = (Jim_Obj *)Jim_CmdPrivData(interp); // prefixListObj is a list to which the args need to be appended Jim_Obj *cmdList = Jim_DuplicateObj(interp, prefixListObj); Jim_ListInsertElements(interp, cmdList, Jim_ListLength(interp, cmdList), argc - 1, argv + 1); return JimEvalObjList(interp, cmdList); } static __device__ void JimAliasCmdDelete(ClientData privData, Jim_Interp *interp) { Jim_Obj *prefixListObj = (Jim_Obj *)privData; Jim_DecrRefCount(interp, prefixListObj); } static __device__ int Jim_AliasCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "newname command ?args ...?"); return JIM_ERROR; } Jim_Obj *prefixListObj = Jim_NewListObj(interp, argv + 2, argc - 2); Jim_IncrRefCount(prefixListObj); const char *newname = Jim_String(argv[1]); if (newname[0] == ':' && newname[1] == ':') { while (*++newname == ':') { } } Jim_SetResult(interp, argv[1]); return Jim_CreateCommand(interp, newname, JimAliasCmd, prefixListObj, JimAliasCmdDelete); } // [proc] static __device__ int Jim_ProcCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 4 && argc != 5) { Jim_WrongNumArgs(interp, 1, argv, "name arglist ?statics? body"); return JIM_ERROR; } if (JimValidName(interp, "procedure", argv[1]) != JIM_OK) return JIM_ERROR; Jim_Cmd *cmd = (argc == 4 ? JimCreateProcedureCmd(interp, argv[2], NULL, argv[3], NULL) : JimCreateProcedureCmd(interp, argv[2], argv[3], argv[4], NULL)); if (cmd) { // Add the new command Jim_Obj *qualifiedCmdNameObj; const char *cmdname = JimQualifyName(interp, Jim_String(argv[1]), &qualifiedCmdNameObj); JimCreateCommand(interp, cmdname, cmd); // Calculate and set the namespace for this proc JimUpdateProcNamespace(interp, cmd, cmdname); JimFreeQualifiedName(interp, qualifiedCmdNameObj); // Unlike Tcl, set the name of the proc as the result Jim_SetResult(interp, argv[1]); return JIM_OK; } return JIM_ERROR; } // [local] static __device__ int Jim_LocalCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "cmd ?args ...?"); return JIM_ERROR; } // Evaluate the arguments with 'local' in force interp->local++; int retcode = Jim_EvalObjVector(interp, argc - 1, argv + 1); interp->local--; // If OK, and the result is a proc, add it to the list of local procs if (retcode == 0) { Jim_Obj *cmdNameObj = Jim_GetResult(interp); if (Jim_GetCommand(interp, cmdNameObj, JIM_ERRMSG) == NULL) return JIM_ERROR; if (interp->framePtr->localCommands == NULL) { interp->framePtr->localCommands = (Jim_Stack *)Jim_Alloc(sizeof(*interp->framePtr->localCommands)); Jim_InitStack(interp->framePtr->localCommands); } Jim_IncrRefCount(cmdNameObj); Jim_StackPush(interp->framePtr->localCommands, cmdNameObj); } return retcode; } // [upcall] static __device__ int Jim_UpcallCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "cmd ?args ...?"); return JIM_ERROR; } else { Jim_Cmd *cmdPtr = Jim_GetCommand(interp, argv[1], JIM_ERRMSG); if (cmdPtr == NULL || !cmdPtr->isproc || !cmdPtr->prevCmd) { Jim_SetResultFormatted(interp, "no previous command: \"%#s\"", argv[1]); return JIM_ERROR; } // OK. Mark this command as being in an upcall cmdPtr->u.proc.upcall++; JimIncrCmdRefCount(cmdPtr); // Invoke the command as normal int retcode = Jim_EvalObjVector(interp, argc - 1, argv + 1); // No longer in an upcall cmdPtr->u.proc.upcall--; JimDecrCmdRefCount(interp, cmdPtr); return retcode; } } // [apply] static __device__ int Jim_ApplyCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "lambdaExpr ?arg ...?"); return JIM_ERROR; } else { int len = Jim_ListLength(interp, argv[1]); if (len != 2 && len != 3) { Jim_SetResultFormatted(interp, "can't interpret \"%#s\" as a lambda expression", argv[1]); return JIM_ERROR; } Jim_Obj *nsObj = NULL; if (len == 3) { #ifdef jim_ext_namespace nsObj = JimQualifyNameObj(interp, Jim_ListGetIndex(interp, argv[1], 2)); // Need to canonicalise the given namespace #else Jim_SetResultString(interp, "namespaces not enabled", -1); return JIM_ERROR; #endif } Jim_Obj *argListObjPtr = Jim_ListGetIndex(interp, argv[1], 0); Jim_Obj *bodyObjPtr = Jim_ListGetIndex(interp, argv[1], 1); Jim_Cmd *cmd = JimCreateProcedureCmd(interp, argListObjPtr, NULL, bodyObjPtr, nsObj); if (cmd) { // Create a new argv array with a dummy argv[0], for error messages Jim_Obj **nargv = (Jim_Obj **)Jim_Alloc((argc - 2 + 1) * sizeof(*nargv)); nargv[0] = Jim_NewStringObj(interp, "apply lambdaExpr", -1); Jim_IncrRefCount(nargv[0]); memcpy(&nargv[1], argv + 2, (argc - 2) * sizeof(*nargv)); int ret = JimCallProcedure(interp, cmd, argc - 2 + 1, nargv); Jim_DecrRefCount(interp, nargv[0]); Jim_Free(nargv); JimDecrCmdRefCount(interp, cmd); return ret; } return JIM_ERROR; } } // [concat] static __device__ int Jim_ConcatCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_SetResult(interp, Jim_ConcatObj(interp, argc - 1, argv + 1)); return JIM_OK; } // [upvar] static __device__ int Jim_UpvarCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_CallFrame *targetCallFrame; // Lookup the target frame pointer if (argc > 3 && (argc % 2 == 0)) { targetCallFrame = Jim_GetCallFrameByLevel(interp, argv[1]); argc--; argv++; } else targetCallFrame = Jim_GetCallFrameByLevel(interp, NULL); if (targetCallFrame == NULL) return JIM_ERROR; // Check for arity if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "?level? otherVar localVar ?otherVar localVar ...?"); return JIM_ERROR; } // Now... for every other/local couple: for (int i = 1; i < argc; i += 2) if (Jim_SetVariableLink(interp, argv[i + 1], argv[i], targetCallFrame) != JIM_OK) return JIM_ERROR; return JIM_OK; } // [global] static __device__ int JimGLOBAL_CoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?varName ...?"); return JIM_ERROR; } // Link every var to the toplevel having the same name if (interp->framePtr->level == 0) return JIM_OK; // global at toplevel... for (int i = 1; i < argc; i++) { // global ::blah does nothing const char *name = Jim_String(argv[i]); if (name[0] != ':' || name[1] != ':') if (Jim_SetVariableLink(interp, argv[i], argv[i], interp->topFramePtr) != JIM_OK) return JIM_ERROR; } return JIM_OK; } // does the [string map] operation. On error NULL is returned, otherwise a new string object with the result, having refcount = 0, is returned. static __device__ Jim_Obj *JimStringMap(Jim_Interp *interp, Jim_Obj *mapListObjPtr, Jim_Obj *objPtr, int nocase) { int numMaps = Jim_ListLength(interp, mapListObjPtr); if (numMaps % 2) { Jim_SetResultString(interp, "list must contain an even number of elements", -1); return NULL; } const char *str = Jim_String(objPtr); int strLen = Jim_Utf8Length(interp, objPtr); // Map it const char *noMatchStart = NULL; Jim_Obj *resultObjPtr = Jim_NewStringObj(interp, "", 0); while (strLen) { int i; for (i = 0; i < numMaps; i += 2) { Jim_Obj *objPtr = Jim_ListGetIndex(interp, mapListObjPtr, i); const char *k = Jim_String(objPtr); int kl = Jim_Utf8Length(interp, objPtr); if (strLen >= kl && kl) { int rc = JimStringCompareLen(str, k, kl, nocase); if (rc == 0) { if (noMatchStart) { Jim_AppendString(interp, resultObjPtr, noMatchStart, (int)(str - noMatchStart)); noMatchStart = NULL; } Jim_AppendObj(interp, resultObjPtr, Jim_ListGetIndex(interp, mapListObjPtr, i + 1)); str += utf8_index(str, kl); strLen -= kl; break; } } } // no match if (i == numMaps) { if (noMatchStart == NULL) noMatchStart = str; int c; UNUSED_SYMBOL(c); str += utf8_tounicode(str, &c); strLen--; } } if (noMatchStart) Jim_AppendString(interp, resultObjPtr, noMatchStart, (int)(str - noMatchStart)); return resultObjPtr; } // [string] __constant__ static const char *const _string_options[] = { "bytelength", "length", "compare", "match", "equal", "is", "byterange", "range", "replace", "map", "repeat", "reverse", "index", "first", "last", "cat", "trim", "trimleft", "trimright", "tolower", "toupper", "totitle", NULL }; __constant__ static const char *const _string_nocase_options[] = { "-nocase", NULL }; __constant__ static const char *const _string_nocase_length_options[] = { "-nocase", "-length", NULL }; static __device__ int Jim_StringCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_BYTELENGTH, OPT_LENGTH, OPT_COMPARE, OPT_MATCH, OPT_EQUAL, OPT_IS, OPT_BYTERANGE, OPT_RANGE, OPT_REPLACE, OPT_MAP, OPT_REPEAT, OPT_REVERSE, OPT_INDEX, OPT_FIRST, OPT_LAST, OPT_CAT, OPT_TRIM, OPT_TRIMLEFT, OPT_TRIMRIGHT, OPT_TOLOWER, OPT_TOUPPER, OPT_TOTITLE }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "option ?arguments ...?"); return JIM_ERROR; } int option; if (Jim_GetEnum(interp, argv[1], _string_options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; int len; int opt_case = 1; switch (option) { case OPT_LENGTH: case OPT_BYTELENGTH: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "string"); return JIM_ERROR; } len = (option == OPT_LENGTH ? Jim_Utf8Length(interp, argv[2]) : Jim_Length(argv[2])); Jim_SetResultInt(interp, len); return JIM_OK; case OPT_CAT: { Jim_Obj *objPtr; // optimise the one-arg case if (argc == 3) objPtr = argv[2]; else { objPtr = Jim_NewStringObj(interp, "", 0); for (int i = 2; i < argc; i++) Jim_AppendObj(interp, objPtr, argv[i]); } Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_COMPARE: case OPT_EQUAL: { // n is the number of remaining option args long opt_length = -1; int n = argc - 4; int i = 2; while (n > 0) { int subopt; if (Jim_GetEnum(interp, argv[i++], _string_nocase_length_options, &subopt, NULL, JIM_ENUM_ABBREV) != JIM_OK) { badcompareargs: Jim_WrongNumArgs(interp, 2, argv, "?-nocase? ?-length int? string1 string2"); return JIM_ERROR; } if (subopt == 0) { // -nocase opt_case = 0; n--; } else { // -length if (n < 2) goto badcompareargs; if (Jim_GetLong(interp, argv[i++], &opt_length) != JIM_OK) return JIM_ERROR; n -= 2; } } if (n) goto badcompareargs; argv += argc - 2; // Fast version - [string equal], case sensitive, no length if (opt_length < 0 && option != OPT_COMPARE && opt_case) Jim_SetResultBool(interp, Jim_StringEqObj(argv[0], argv[1])); else { if (opt_length >= 0) n = JimStringCompareLen(Jim_String(argv[0]), Jim_String(argv[1]), opt_length, !opt_case); else n = Jim_StringCompareObj(interp, argv[0], argv[1], !opt_case); Jim_SetResultInt(interp, option == OPT_COMPARE ? n : n == 0); } return JIM_OK; } case OPT_MATCH: if (argc != 4 && (argc != 5 || Jim_GetEnum(interp, argv[2], _string_nocase_options, &opt_case, NULL, JIM_ENUM_ABBREV) != JIM_OK)) { Jim_WrongNumArgs(interp, 2, argv, "?-nocase? pattern string"); return JIM_ERROR; } if (opt_case == 0) argv++; Jim_SetResultBool(interp, Jim_StringMatchObj(interp, argv[2], argv[3], !opt_case)); return JIM_OK; case OPT_MAP: { if (argc != 4 && (argc != 5 || Jim_GetEnum(interp, argv[2], _string_nocase_options, &opt_case, NULL, JIM_ENUM_ABBREV) != JIM_OK)) { Jim_WrongNumArgs(interp, 2, argv, "?-nocase? mapList string"); return JIM_ERROR; } if (opt_case == 0) argv++; Jim_Obj *objPtr = JimStringMap(interp, argv[2], argv[3], !opt_case); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_RANGE: case OPT_BYTERANGE:{ if (argc != 5) { Jim_WrongNumArgs(interp, 2, argv, "string first last"); return JIM_ERROR; } Jim_Obj *objPtr = (option == OPT_RANGE ? Jim_StringRangeObj(interp, argv[2], argv[3], argv[4]) : Jim_StringByteRangeObj(interp, argv[2], argv[3], argv[4])); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_REPLACE:{ if (argc != 5 && argc != 6) { Jim_WrongNumArgs(interp, 2, argv, "string first last ?string?"); return JIM_ERROR; } Jim_Obj *objPtr = JimStringReplaceObj(interp, argv[2], argv[3], argv[4], argc == 6 ? argv[5] : NULL); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_REPEAT:{ if (argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "string count"); return JIM_ERROR; } jim_wide count; if (Jim_GetWide(interp, argv[3], &count) != JIM_OK) return JIM_ERROR; Jim_Obj *objPtr = Jim_NewStringObj(interp, "", 0); if (count > 0) while (count--) Jim_AppendObj(interp, objPtr, argv[2]); Jim_SetResult(interp, objPtr); return JIM_OK; } case OPT_REVERSE:{ if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "string"); return JIM_ERROR; } const char *str = Jim_GetString(argv[2], &len); char *buf = (char *)Jim_Alloc(len + 1); char *p = buf + len; *p = 0; for (int i = 0; i < len; ) { int c; UNUSED_SYMBOL(c); int l = utf8_tounicode(str, &c); memcpy(p - l, str, l); p -= l; i += l; str += l; } Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, buf, len)); return JIM_OK; } case OPT_INDEX:{ if (argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "string index"); return JIM_ERROR; } int idx; if (Jim_GetIndex(interp, argv[3], &idx) != JIM_OK) return JIM_ERROR; const char *str = Jim_String(argv[2]); len = Jim_Utf8Length(interp, argv[2]); if (idx != INT_MIN && idx != INT_MAX) idx = JimRelToAbsIndex(len, idx); if (idx < 0 || idx >= len || str == NULL) Jim_SetResultString(interp, "", 0); // ASCII optimisation else if (len == Jim_Length(argv[2])) Jim_SetResultString(interp, str + idx, 1); else { int i = utf8_index(str, idx); int c; UNUSED_SYMBOL(c); Jim_SetResultString(interp, str + i, utf8_tounicode(str + i, &c)); } return JIM_OK; } case OPT_FIRST: case OPT_LAST:{ if (argc != 4 && argc != 5) { Jim_WrongNumArgs(interp, 2, argv, "subString string ?index?"); return JIM_ERROR; } const char *s1 = Jim_String(argv[2]); const char *s2 = Jim_String(argv[3]); int l1 = Jim_Utf8Length(interp, argv[2]); int l2 = Jim_Utf8Length(interp, argv[3]); int idx = 0; if (argc == 5) { if (Jim_GetIndex(interp, argv[4], &idx) != JIM_OK) return JIM_ERROR; idx = JimRelToAbsIndex(l2, idx); } else if (option == OPT_LAST) idx = l2; if (option == OPT_FIRST) Jim_SetResultInt(interp, JimStringFirst(s1, l1, s2, l2, idx)); else #ifdef JIM_UTF8 Jim_SetResultInt(interp, JimStringLastUtf8(s1, l1, s2, idx)); #else Jim_SetResultInt(interp, JimStringLast(s1, l1, s2, idx)); #endif return JIM_OK; } case OPT_TRIM: case OPT_TRIMLEFT: case OPT_TRIMRIGHT:{ if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "string ?trimchars?"); return JIM_ERROR; } Jim_Obj *trimchars = (argc == 4 ? argv[3] : NULL); if (option == OPT_TRIM) Jim_SetResult(interp, JimStringTrim(interp, argv[2], trimchars)); else if (option == OPT_TRIMLEFT) Jim_SetResult(interp, JimStringTrimLeft(interp, argv[2], trimchars)); else if (option == OPT_TRIMRIGHT) Jim_SetResult(interp, JimStringTrimRight(interp, argv[2], trimchars)); return JIM_OK; } case OPT_TOLOWER: case OPT_TOUPPER: case OPT_TOTITLE: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "string"); return JIM_ERROR; } if (option == OPT_TOLOWER) Jim_SetResult(interp, JimStringToLower(interp, argv[2])); else if (option == OPT_TOUPPER) Jim_SetResult(interp, JimStringToUpper(interp, argv[2])); else Jim_SetResult(interp, JimStringToTitle(interp, argv[2])); return JIM_OK; case OPT_IS: if (argc == 4 || (argc == 5 && Jim_CompareStringImmediate(interp, argv[3], "-strict"))) return JimStringIs(interp, argv[argc - 1], argv[2], argc == 5); Jim_WrongNumArgs(interp, 2, argv, "class ?-strict? str"); return JIM_ERROR; } return JIM_OK; } // [time] static __device__ int Jim_TimeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "script ?count?"); return JIM_ERROR; } long count = 1; if (argc == 3 && Jim_GetLong(interp, argv[2], &count) != JIM_OK) return JIM_ERROR; if (count < 0) return JIM_OK; long i = count; jim_wide start = JimClock(); while (i-- > 0) { int retval = Jim_EvalObj(interp, argv[1]); if (retval != JIM_OK) return retval; } jim_wide elapsed = JimClock() - start; char buf[60]; const char *fmt = "%" JIM_WIDE_MODIFIER " microseconds per iteration"; sprintf(buf, fmt, count == 0 ? 0 : elapsed / count); Jim_SetResultString(interp, buf, -1); return JIM_OK; } // [exit] static __device__ int Jim_ExitCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc > 2) { Jim_WrongNumArgs(interp, 1, argv, "?exitCode?"); return JIM_ERROR; } long exitCode = 0; if (argc == 2 && Jim_GetLong(interp, argv[1], &exitCode) != JIM_OK) return JIM_ERROR; interp->exitCode = exitCode; return JIM_EXIT; } // [catch] static __device__ int Jim_CatchCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { // Which return codes are ignored (passed through)? By default, only exit, eval and signal jim_wide ignore_mask = (1 << JIM_EXIT) | (1 << JIM_EVAL) | (1 << JIM_SIGNAL); const int max_ignore_code = sizeof(ignore_mask) * 8; // Reset the error code before catch. Note that this is not strictly correct. Jim_SetVariableStr(interp, "errorCode", Jim_NewStringObj(interp, "NONE", -1), JIMGLOBAL_); int i; for (i = 1; i < argc - 1; i++) { const char *arg = Jim_String(argv[i]); jim_wide option; int ignore; // It's a pity we can't use Jim_GetEnum here :-( if (!strcmp(arg, "--")) { i++; break; } if (*arg != '-') break; if (!strncmp(arg, "-no", 3)) { arg += 3; ignore = 1; } else { arg++; ignore = 0; } if (Jim_StringToWide(arg, &option, 10) != JIM_OK) { option = -1; } if (option < 0) { option = Jim_FindByName(arg, jimReturnCodes, jimReturnCodesSize); } if (option < 0) { goto wrongargs; } if (ignore) ignore_mask |= (1 << option); else ignore_mask &= ~(1 << option); } argc -= i; if (argc < 1 || argc > 3) { wrongargs: Jim_WrongNumArgs(interp, 1, argv, "?-?no?code ... --? script ?resultVarName? ?optionVarName?"); return JIM_ERROR; } argv += i; int sig = 0; if ((ignore_mask & (1 << JIM_SIGNAL)) == 0) sig++; interp->signal_level += sig; // If a signal is set, don't even try to execute the body int exitCode = 0; if (Jim_CheckSignal(interp)) exitCode = JIM_SIGNAL; else { exitCode = Jim_EvalObj(interp, argv[0]); // Don't want any caught error included in a later stack trace interp->errorFlag = 0; } interp->signal_level -= sig; // Catch or pass through? Only the first 32/64 codes can be passed through if (exitCode >= 0 && exitCode < max_ignore_code && (((unsigned jim_wide)1 << exitCode) & ignore_mask)) return exitCode; // Not caught, pass it up if (sig && exitCode == JIM_SIGNAL) { // Catch the signal at this level if (interp->signal_set_result) interp->signal_set_result(interp, interp->sigmask); else Jim_SetResultInt(interp, interp->sigmask); interp->sigmask = 0; } if (argc >= 2) { if (Jim_SetVariable(interp, argv[1], Jim_GetResult(interp)) != JIM_OK) return JIM_ERROR; if (argc == 3) { Jim_Obj *optListObj = Jim_NewListObj(interp, NULL, 0); Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-code", -1)); Jim_ListAppendElement(interp, optListObj, Jim_NewIntObj(interp, exitCode == JIM_RETURN ? interp->returnCode : exitCode)); Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-level", -1)); Jim_ListAppendElement(interp, optListObj, Jim_NewIntObj(interp, interp->returnLevel)); if (exitCode == JIM_ERROR) { Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-errorinfo", -1)); Jim_ListAppendElement(interp, optListObj, interp->stackTrace); Jim_Obj *errorCode = Jim_GetVariableStr(interp, "errorCode", JIMGLOBAL_); if (errorCode) { Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-errorcode", -1)); Jim_ListAppendElement(interp, optListObj, errorCode); } } if (Jim_SetVariable(interp, argv[2], optListObj) != JIM_OK) return JIM_ERROR; } } Jim_SetResultInt(interp, exitCode); return JIM_OK; } #ifdef JIM_REFERENCES // [ref] static __device__ int Jim_RefCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 1, argv, "string tag ?finalizer?"); return JIM_ERROR; } if (argc == 3) Jim_SetResult(interp, Jim_NewReference(interp, argv[1], argv[2], NULL)); else Jim_SetResult(interp, Jim_NewReference(interp, argv[1], argv[2], argv[3])); return JIM_OK; } // [getref] static __device__ int Jim_GetrefCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "reference"); return JIM_ERROR; } Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, argv[1])) == NULL) return JIM_ERROR; Jim_SetResult(interp, refPtr->objPtr); return JIM_OK; } // [setref] static __device__ int Jim_SetrefCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "reference newValue"); return JIM_ERROR; } Jim_Reference *refPtr; if ((refPtr = Jim_GetReference(interp, argv[1])) == NULL) return JIM_ERROR; Jim_IncrRefCount(argv[2]); Jim_DecrRefCount(interp, refPtr->objPtr); refPtr->objPtr = argv[2]; Jim_SetResult(interp, argv[2]); return JIM_OK; } // [collect] static __device__ int Jim_CollectCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, ""); return JIM_ERROR; } Jim_SetResultInt(interp, Jim_Collect(interp)); // Free all the freed objects while (interp->freeList) { Jim_Obj *nextObjPtr = interp->freeList->nextObjPtr; Jim_Free(interp->freeList); interp->freeList = nextObjPtr; } return JIM_OK; } // [finalize] reference ?newValue? static __device__ int Jim_FinalizeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "reference ?finalizerProc?"); return JIM_ERROR; } if (argc == 2) { Jim_Obj *cmdNamePtr; if (Jim_GetFinalizer(interp, argv[1], &cmdNamePtr) != JIM_OK) return JIM_ERROR; if (cmdNamePtr != NULL) // otherwise the null string is returned Jim_SetResult(interp, cmdNamePtr); } else { if (Jim_SetFinalizer(interp, argv[1], argv[2]) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, argv[2]); } return JIM_OK; } // [info references] static __device__ int JimInfoReferences(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); Jim_HashTableIterator htiter; Jim_HashEntry *he; JimInitHashTableIterator(&interp->references, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) { char buf[JIM_REFERENCE_SPACE + 1]; Jim_Reference *refPtr = (Jim_Reference *)Jim_GetHashEntryVal(he); const unsigned long *refId = (const unsigned long *)he->key; JimFormatReference(buf, refPtr, *refId); Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, buf, -1)); } Jim_SetResult(interp, listObjPtr); return JIM_OK; } #endif // [rename] static __device__ int Jim_RenameCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "oldName newName"); return JIM_ERROR; } if (JimValidName(interp, "new procedure", argv[2])) return JIM_ERROR; return Jim_RenameCommand(interp, Jim_String(argv[1]), Jim_String(argv[2])); } #define JIM_DICTMATCH_VALUES 0x0001 typedef void JimDictMatchCallbackType(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type); static __device__ void JimDictMatchKeys(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_HashEntry *he, int type) { Jim_ListAppendElement(interp, listObjPtr, (Jim_Obj *)he->key); if (type & JIM_DICTMATCH_VALUES) Jim_ListAppendElement(interp, listObjPtr, (Jim_Obj *)Jim_GetHashEntryVal(he)); } // Like JimHashtablePatternMatch, but for dictionaries. static __device__ Jim_Obj *JimDictPatternMatch(Jim_Interp *interp, Jim_HashTable *ht, Jim_Obj *patternObjPtr, JimDictMatchCallbackType *callback, int type) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); // Check for the non-pattern case. We can do this much more efficiently. Jim_HashEntry *he; Jim_HashTableIterator htiter; JimInitHashTableIterator(ht, &htiter); while ((he = Jim_NextHashEntry(&htiter)) != NULL) if (patternObjPtr == NULL || JimGlobMatch(Jim_String(patternObjPtr), Jim_String((Jim_Obj *)he->key), 0)) callback(interp, listObjPtr, he, type); return listObjPtr; } __device__ int Jim_DictKeys(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *patternObjPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, JimDictPatternMatch(interp, (Jim_HashTable *)objPtr->internalRep.ptr, patternObjPtr, JimDictMatchKeys, 0)); return JIM_OK; } __device__ int Jim_DictValues(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *patternObjPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, JimDictPatternMatch(interp, (Jim_HashTable *)objPtr->internalRep.ptr, patternObjPtr, JimDictMatchKeys, JIM_DICTMATCH_VALUES)); return JIM_OK; } __device__ int Jim_DictSize(Jim_Interp *interp, Jim_Obj *objPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return -1; return ((Jim_HashTable *)objPtr->internalRep.ptr)->used; } __device__ int Jim_DictInfo(Jim_Interp *interp, Jim_Obj *objPtr) { if (SetDictFromAny(interp, objPtr) != JIM_OK) return JIM_ERROR; Jim_HashTable *ht = (Jim_HashTable *)objPtr->internalRep.ptr; // Note that this uses internal knowledge of the hash table printf("%d entries in table, %d buckets\n", ht->used, ht->size); for (unsigned int i = 0; i < ht->size; i++) { Jim_HashEntry *he = ht->table[i]; if (he) { printf("%d: ", i); while (he) { printf(" %s", Jim_String((Jim_Obj *)he->key)); he = he->next; } printf("\n"); } } return JIM_OK; } static __device__ int Jim_EvalEnsemble(Jim_Interp *interp, const char *basecmd, const char *subcmd, int argc, Jim_Obj *const *argv) { Jim_Obj *prefixObj = Jim_NewStringObj(interp, basecmd, -1); Jim_AppendString(interp, prefixObj, " ", 1); Jim_AppendString(interp, prefixObj, subcmd, -1); return Jim_EvalObjPrefix(interp, prefixObj, argc, argv); } // [dict] __constant__ static const char * const _dict_options[] = { "create", "get", "set", "unset", "exists", "keys", "size", "info", "merge", "with", "append", "lappend", "incr", "remove", "values", "for", "replace", "update", NULL }; static __device__ int Jim_DictCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_CREATE, OPT_GET, OPT_SET, OPT_UNSET, OPT_EXISTS, OPT_KEYS, OPT_SIZE, OPT_INFO, OPT_MERGE, OPT_WITH, OPT_APPEND, OPT_LAPPEND, OPT_INCR, OPT_REMOVE, OPT_VALUES, OPT_FOR, OPT_REPLACE, OPT_UPDATE, }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?arguments ...?"); return JIM_ERROR; } int option; if (Jim_GetEnum(interp, argv[1], _dict_options, &option, "subcommand", JIM_ERRMSG) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_GET: if (argc < 3) { Jim_WrongNumArgs(interp, 2, argv, "dictionary ?key ...?"); return JIM_ERROR; } Jim_Obj *objPtr; if (Jim_DictKeysVector(interp, argv[2], argv + 3, argc - 3, &objPtr, JIM_ERRMSG) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; case OPT_SET: if (argc < 5) { Jim_WrongNumArgs(interp, 2, argv, "varName key ?key ...? value"); return JIM_ERROR; } return Jim_SetDictKeysVector(interp, argv[2], argv + 3, argc - 4, argv[argc - 1], JIM_ERRMSG); case OPT_EXISTS: if (argc < 4) { Jim_WrongNumArgs(interp, 2, argv, "dictionary key ?key ...?"); return JIM_ERROR; } else { int rc = Jim_DictKeysVector(interp, argv[2], argv + 3, argc - 3, &objPtr, JIM_ERRMSG); if (rc < 0) return JIM_ERROR; Jim_SetResultBool(interp, rc == JIM_OK); return JIM_OK; } case OPT_UNSET: if (argc < 4) { Jim_WrongNumArgs(interp, 2, argv, "varName key ?key ...?"); return JIM_ERROR; } if (Jim_SetDictKeysVector(interp, argv[2], argv + 3, argc - 3, NULL, 0) != JIM_OK) return JIM_ERROR; return JIM_OK; case OPT_KEYS: if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "dictionary ?pattern?"); return JIM_ERROR; } return Jim_DictKeys(interp, argv[2], argc == 4 ? argv[3] : NULL); case OPT_SIZE: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "dictionary"); return JIM_ERROR; } else if (Jim_DictSize(interp, argv[2]) < 0) return JIM_ERROR; Jim_SetResultInt(interp, Jim_DictSize(interp, argv[2])); return JIM_OK; case OPT_MERGE: if (argc == 2) return JIM_OK; if (Jim_DictSize(interp, argv[2]) < 0) return JIM_ERROR; // Handle as ensemble break; case OPT_UPDATE: // Better error message if (argc < 6 || argc % 2) argc = 2; break; case OPT_CREATE: if (argc % 2) { Jim_WrongNumArgs(interp, 2, argv, "?key value ...?"); return JIM_ERROR; } objPtr = Jim_NewDictObj(interp, argv + 2, argc - 2); Jim_SetResult(interp, objPtr); return JIM_OK; case OPT_INFO: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "dictionary"); return JIM_ERROR; } return Jim_DictInfo(interp, argv[2]); } // Handle command as an ensemble return Jim_EvalEnsemble(interp, "dict", _dict_options[option], argc - 2, argv + 2); } // [subst] __constant__ static const char *const _subst_options[] = { "-nobackslashes", "-nocommands", "-novariables", NULL }; static __device__ int Jim_SubstCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_NOBACKSLASHES, OPT_NOCOMMANDS, OPT_NOVARIABLES }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "?options? string"); return JIM_ERROR; } int flags = JIM_SUBST_FLAG; for (int i = 1; i < (argc - 1); i++) { int option; if (Jim_GetEnum(interp, argv[i], _subst_options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; switch (option) { case OPT_NOBACKSLASHES: flags |= JIM_SUBST_NOESC; break; case OPT_NOCOMMANDS: flags |= JIM_SUBST_NOCMD; break; case OPT_NOVARIABLES: flags |= JIM_SUBST_NOVAR; break; } } Jim_Obj *objPtr; if (Jim_SubstObj(interp, argv[argc - 1], &objPtr, flags) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // [info] __constant__ static const char *const _info_commands[] = { "body", "statics", "commands", "procs", "channels", "exists", "globals", "level", "frame", "locals", "vars", "version", "patchlevel", "complete", "args", "hostname", "script", "source", "stacktrace", "nameofexecutable", "returncodes", "references", "alias", NULL }; static __device__ int Jim_InfoCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { INFO_BODY, INFO_STATICS, INFO_COMMANDS, INFO_PROCS, INFO_CHANNELS, INFO_EXISTS, INFOGLOBAL_S, INFO_LEVEL, INFO_FRAME, INFO_LOCALS, INFO_VARS, INFO_VERSION, INFO_PATCHLEVEL, INFO_COMPLETE, INFO_ARGS, INFO_HOSTNAME, INFO_SCRIPT, INFO_SOURCE, INFO_STACKTRACE, INFO_NAMEOFEXECUTABLE, INFO_RETURNCODES, INFO_REFERENCES, INFO_ALIAS, }; #ifdef jim_ext_namespace int nons = 0; if (argc > 2 && Jim_CompareStringImmediate(interp, argv[1], "-nons")) { // This is for internal use only argc--; argv++; nons = 1; } #endif if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?args ...?"); return JIM_ERROR; } int cmd; if (Jim_GetEnum(interp, argv[1], _info_commands, &cmd, "subcommand", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; // Test for the the most common commands first, just in case it makes a difference Jim_Obj *objPtr; int mode = 0; switch (cmd) { case INFO_EXISTS: if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "varName"); return JIM_ERROR; } Jim_SetResultBool(interp, Jim_GetVariable(interp, argv[2], 0) != NULL); break; case INFO_ALIAS:{ Jim_Cmd *cmdPtr; if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "command"); return JIM_ERROR; } if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) return JIM_ERROR; if (cmdPtr->isproc || cmdPtr->u.native.cmdProc != JimAliasCmd) { Jim_SetResultFormatted(interp, "command \"%#s\" is not an alias", argv[2]); return JIM_ERROR; } Jim_SetResult(interp, (Jim_Obj *)cmdPtr->u.native.privData); return JIM_OK; } case INFO_CHANNELS: mode++; // JIM_CMDLIST_CHANNELS #ifndef jim_ext_aio Jim_SetResultString(interp, "aio not enabled", -1); return JIM_ERROR; #endif case INFO_PROCS: mode++; // JIM_CMDLIST_PROCS case INFO_COMMANDS: // mode 0 => JIM_CMDLIST_COMMANDS if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "?pattern?"); return JIM_ERROR; } #ifdef jim_ext_namespace if (!nons) if (Jim_Length(interp->framePtr->nsObj) || (argc == 3 && JimGlobMatch("::*", Jim_String(argv[2]), 0))) return Jim_EvalPrefix(interp, "namespace info", argc - 1, argv + 1); #endif Jim_SetResult(interp, JimCommandsList(interp, argc == 3 ? argv[2] : NULL, mode)); break; case INFO_VARS: mode++; // JIM_VARLIST_VARS case INFO_LOCALS: mode++; // JIM_VARLIST_LOCALS case INFOGLOBAL_S: // mode 0 => JIM_VARLISTGLOBAL_S if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "?pattern?"); return JIM_ERROR; } #ifdef jim_ext_namespace if (!nons) if (Jim_Length(interp->framePtr->nsObj) || (argc == 3 && JimGlobMatch("::*", Jim_String(argv[2]), 0))) return Jim_EvalPrefix(interp, "namespace info", argc - 1, argv + 1); #endif Jim_SetResult(interp, JimVariablesList(interp, argc == 3 ? argv[2] : NULL, mode)); break; case INFO_SCRIPT: if (argc != 2) { Jim_WrongNumArgs(interp, 2, argv, ""); return JIM_ERROR; } Jim_SetResult(interp, JimGetScript(interp, interp->currentScriptObj)->fileNameObj); break; case INFO_SOURCE:{ if (argc != 3 && argc != 5) { Jim_WrongNumArgs(interp, 2, argv, "source ?filename line?"); return JIM_ERROR; } jim_wide line; Jim_Obj *resObjPtr; if (argc == 5) { if (Jim_GetWide(interp, argv[4], &line) != JIM_OK) return JIM_ERROR; resObjPtr = Jim_NewStringObj(interp, Jim_String(argv[2]), Jim_Length(argv[2])); JimSetSourceInfo(interp, resObjPtr, argv[3], (int)line); } else { Jim_Obj *fileNameObj; if (argv[2]->typePtr == &_sourceObjType) { fileNameObj = argv[2]->internalRep.sourceValue.fileNameObj; line = argv[2]->internalRep.sourceValue.lineNumber; } else if (argv[2]->typePtr == &_scriptObjType) { ScriptObj *script = JimGetScript(interp, argv[2]); fileNameObj = script->fileNameObj; line = script->firstline; } else { fileNameObj = interp->emptyObj; line = 1; } resObjPtr = Jim_NewListObj(interp, NULL, 0); Jim_ListAppendElement(interp, resObjPtr, fileNameObj); Jim_ListAppendElement(interp, resObjPtr, Jim_NewIntObj(interp, line)); } Jim_SetResult(interp, resObjPtr); break; } case INFO_STACKTRACE: Jim_SetResult(interp, interp->stackTrace); break; case INFO_LEVEL: case INFO_FRAME: switch (argc) { case 2: Jim_SetResultInt(interp, interp->framePtr->level); break; case 3: if (JimInfoLevel(interp, argv[2], &objPtr, cmd == INFO_LEVEL) != JIM_OK) return JIM_ERROR; Jim_SetResult(interp, objPtr); break; default: Jim_WrongNumArgs(interp, 2, argv, "?levelNum?"); return JIM_ERROR; } break; case INFO_BODY: case INFO_STATICS: case INFO_ARGS:{ if (argc != 3) { Jim_WrongNumArgs(interp, 2, argv, "procname"); return JIM_ERROR; } Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) return JIM_ERROR; if (!cmdPtr->isproc) { Jim_SetResultFormatted(interp, "command \"%#s\" is not a procedure", argv[2]); return JIM_ERROR; } switch (cmd) { case INFO_BODY: Jim_SetResult(interp, cmdPtr->u.proc.bodyObjPtr); break; case INFO_ARGS: Jim_SetResult(interp, cmdPtr->u.proc.argListObjPtr); break; case INFO_STATICS: if (cmdPtr->u.proc.staticVars) { int mode = JIM_VARLIST_LOCALS | JIM_VARLIST_VALUES; Jim_SetResult(interp, JimHashtablePatternMatch(interp, cmdPtr->u.proc.staticVars, NULL, JimVariablesMatch, mode)); } break; } break; } case INFO_VERSION: case INFO_PATCHLEVEL: { char buf[(JIM_INTEGER_SPACE * 2) + 1]; sprintf(buf, "%d.%d", JIM_VERSION / 100, JIM_VERSION % 100); Jim_SetResultString(interp, buf, -1); break; } case INFO_COMPLETE: if (argc != 3 && argc != 4) { Jim_WrongNumArgs(interp, 2, argv, "script ?missing?"); return JIM_ERROR; } else { int len; const char *s = Jim_GetString(argv[2], &len); char missing; Jim_SetResultBool(interp, Jim_ScriptIsComplete(s, len, &missing)); if (missing != ' ' && argc == 4) Jim_SetVariable(interp, argv[3], Jim_NewStringObj(interp, &missing, 1)); } break; case INFO_HOSTNAME: return Jim_Eval(interp, "os.gethostname"); // Redirect to os.gethostname if it exists case INFO_NAMEOFEXECUTABLE: return Jim_Eval(interp, "{info nameofexecutable}"); // Redirect to Tcl proc case INFO_RETURNCODES: if (argc == 2) { Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; jimReturnCodes[i]; i++) { Jim_ListAppendElement(interp, listObjPtr, Jim_NewIntObj(interp, i)); Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, jimReturnCodes[i], -1)); } Jim_SetResult(interp, listObjPtr); } else if (argc == 3) { long code; if (Jim_GetLong(interp, argv[2], &code) != JIM_OK) return JIM_ERROR; const char *name = Jim_ReturnCode(code); if (*name == '?') Jim_SetResultInt(interp, code); else Jim_SetResultString(interp, name, -1); } else { Jim_WrongNumArgs(interp, 2, argv, "?code?"); return JIM_ERROR; } break; case INFO_REFERENCES: #ifdef JIM_REFERENCES return JimInfoReferences(interp, argc, argv); #else Jim_SetResultString(interp, "not supported", -1); return JIM_ERROR; #endif } return JIM_OK; } // [exists] __constant__ static const char *const _exists_options[] = { "-command", "-proc", "-alias", "-var", NULL }; static __device__ int Jim_ExistsCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { OPT_COMMAND, OPT_PROC, OPT_ALIAS, OPT_VAR }; int option; Jim_Obj *objPtr; if (argc == 2) { option = OPT_VAR; objPtr = argv[1]; } else if (argc == 3) { if (Jim_GetEnum(interp, argv[1], _exists_options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; objPtr = argv[2]; } else { Jim_WrongNumArgs(interp, 1, argv, "?option? name"); return JIM_ERROR; } int result = 0; if (option == OPT_VAR) result = Jim_GetVariable(interp, objPtr, 0) != NULL; else { // Now different kinds of commands Jim_Cmd *cmd = Jim_GetCommand(interp, objPtr, JIM_NONE); if (cmd) switch (option) { case OPT_COMMAND: result = 1; break; case OPT_ALIAS: result = cmd->isproc == 0 && cmd->u.native.cmdProc == JimAliasCmd; break; case OPT_PROC: result = cmd->isproc; break; } } Jim_SetResultBool(interp, result); return JIM_OK; } // [split] static __device__ int Jim_SplitCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "string ?splitChars?"); return JIM_ERROR; } int len; const char *str = Jim_GetString(argv[1], &len); if (len == 0) return JIM_OK; int strLen = Jim_Utf8Length(interp, argv[1]); // Init const char *splitChars; int splitLen; if (argc == 2) { splitChars = " \n\t\r"; splitLen = 4; } else { splitChars = Jim_String(argv[2]); splitLen = Jim_Utf8Length(interp, argv[2]); } const char *noMatchStart = str; Jim_Obj *resObjPtr = Jim_NewListObj(interp, NULL, 0); // Split int c; if (splitLen) { Jim_Obj *objPtr; while (strLen--) { const char *sc = splitChars; int scLen = splitLen; int sl = utf8_tounicode(str, &c); while (scLen--) { int pc; sc += utf8_tounicode(sc, &pc); if (c == pc) { objPtr = Jim_NewStringObj(interp, noMatchStart, (int)(str - noMatchStart)); Jim_ListAppendElement(interp, resObjPtr, objPtr); noMatchStart = str + sl; break; } } str += sl; } objPtr = Jim_NewStringObj(interp, noMatchStart, (int)(str - noMatchStart)); Jim_ListAppendElement(interp, resObjPtr, objPtr); } else { // This handles the special case of splitchars eq {} Optimise by sharing common (ASCII) characters Jim_Obj **commonObj = NULL; #define NUM_COMMON (128 - 9) while (strLen--) { int n = utf8_tounicode(str, &c); #ifdef JIM_OPTIMIZATION if (c >= 9 && c < 128) { // Common ASCII char. Note that 9 is the tab character c -= 9; if (!commonObj) { commonObj = (Jim_Obj **)Jim_Alloc(sizeof(*commonObj) * NUM_COMMON); memset(commonObj, 0, sizeof(*commonObj) * NUM_COMMON); } if (!commonObj[c]) commonObj[c] = Jim_NewStringObj(interp, str, 1); Jim_ListAppendElement(interp, resObjPtr, commonObj[c]); str++; continue; } #endif Jim_ListAppendElement(interp, resObjPtr, Jim_NewStringObjUtf8(interp, str, 1)); str += n; } Jim_Free(commonObj); } Jim_SetResult(interp, resObjPtr); return JIM_OK; } // [join] static __device__ int Jim_JoinCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "list ?joinString?"); return JIM_ERROR; } // Init const char *joinStr; int joinStrLen; if (argc == 2) { joinStr = " "; joinStrLen = 1; } else joinStr = Jim_GetString(argv[2], &joinStrLen); Jim_SetResult(interp, Jim_ListJoin(interp, argv[1], joinStr, joinStrLen)); return JIM_OK; } // [format] static __device__ int Jim_FormatCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "formatString ?arg arg ...?"); return JIM_ERROR; } Jim_Obj *objPtr = Jim_FormatString(interp, argv[1], argc - 2, argv + 2); if (objPtr == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // [scan] static __device__ int Jim_ScanCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 3) { Jim_WrongNumArgs(interp, 1, argv, "string format ?varName varName ...?"); return JIM_ERROR; } if (argv[2]->typePtr != &_scanFmtStringObjType) SetScanFmtFromAny(interp, argv[2]); if (FormatGetError(argv[2]) != 0) { Jim_SetResultString(interp, FormatGetError(argv[2]), -1); return JIM_ERROR; } if (argc > 3) { int maxPos = (int)FormatGetMaxPos(argv[2]); int count = (int)FormatGetCnvCount(argv[2]); if (maxPos > argc - 3) { Jim_SetResultString(interp, "\"%n$\" argument index out of range", -1); return JIM_ERROR; } else if (count > argc - 3) { Jim_SetResultString(interp, "different numbers of variable names and field specifiers", -1); return JIM_ERROR; } else if (count < argc - 3) { Jim_SetResultString(interp, "variable is not assigned by any conversion specifiers", -1); return JIM_ERROR; } } Jim_Obj *listPtr = Jim_ScanString(interp, argv[1], argv[2], JIM_ERRMSG); if (listPtr == 0) return JIM_ERROR; if (argc > 3) { int rc = JIM_OK; int count = 0; if (listPtr != 0 && listPtr != (Jim_Obj *)EOF) { int len = Jim_ListLength(interp, listPtr); if (len != 0) { Jim_Obj **outVec; int outc; JimListGetElements(interp, listPtr, &outc, &outVec); for (int i = 0; i < outc; ++i) if (Jim_Length(outVec[i]) > 0) { ++count; if (Jim_SetVariable(interp, argv[3 + i], outVec[i]) != JIM_OK) rc = JIM_ERROR; } } Jim_FreeNewObj(interp, listPtr); } else count = -1; if (rc == JIM_OK) Jim_SetResultInt(interp, count); return rc; } else { if (listPtr == (Jim_Obj *)EOF) { Jim_SetResult(interp, Jim_NewListObj(interp, 0, 0)); return JIM_OK; } Jim_SetResult(interp, listPtr); } return JIM_OK; } // [error] static __device__ int Jim_ErrorCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2 && argc != 3) { Jim_WrongNumArgs(interp, 1, argv, "message ?stacktrace?"); return JIM_ERROR; } Jim_SetResult(interp, argv[1]); if (argc == 3) { JimSetStackTrace(interp, argv[2]); return JIM_ERROR; } interp->addStackTrace++; return JIM_ERROR; } // [lrange] static __device__ int Jim_LrangeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 4) { Jim_WrongNumArgs(interp, 1, argv, "list first last"); return JIM_ERROR; } Jim_Obj *objPtr; if ((objPtr = Jim_ListRange(interp, argv[1], argv[2], argv[3])) == NULL) return JIM_ERROR; Jim_SetResult(interp, objPtr); return JIM_OK; } // [lrepeat] static __device__ int Jim_LrepeatCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { long count; if (argc < 2 || Jim_GetLong(interp, argv[1], &count) != JIM_OK || count < 0) { Jim_WrongNumArgs(interp, 1, argv, "count ?value ...?"); return JIM_ERROR; } if (count == 0 || argc == 2) return JIM_OK; argc -= 2; argv += 2; Jim_Obj *objPtr = Jim_NewListObj(interp, argv, argc); while (--count) ListInsertElements(objPtr, -1, argc, argv); Jim_SetResult(interp, objPtr); return JIM_OK; } __device__ char **Jim_GetEnviron() { #if __CUDACC__ return nullptr; #else #ifdef HAVE__NSGETENVIRON return *_NSGetEnviron(); #else #ifndef NO_ENVIRON_EXTERN extern char **environ; #endif return environ; #endif #endif } __device__ void Jim_SetEnviron(char **env) { #if __CUDACC__ return; #else #ifdef HAVE__NSGETENVIRON *_NSGetEnviron() = env; #else #ifndef NO_ENVIRON_EXTERN extern char **environ; #endif environ = env; #endif #endif } // [env] static __device__ int Jim_EnvCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc == 1) { char **e = Jim_GetEnviron(); Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; e[i]; i++) { const char *equals = strchr(e[i], '='); if (equals) { Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, e[i], (int)(equals - e[i]))); Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, equals + 1, -1)); } } Jim_SetResult(interp, listObjPtr); return JIM_OK; } if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "varName ?default?"); return JIM_ERROR; } const char *key = Jim_String(argv[1]); const char *val = getenv(key); if (val == NULL) { if (argc < 3) { Jim_SetResultFormatted(interp, "environment variable \"%#s\" does not exist", argv[1]); return JIM_ERROR; } val = Jim_String(argv[2]); } Jim_SetResult(interp, Jim_NewStringObj(interp, val, -1)); return JIM_OK; } // [source] static __device__ int Jim_SourceCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "fileName"); return JIM_ERROR; } int retval = Jim_EvalFile(interp, Jim_String(argv[1])); return (retval == JIM_RETURN ? JIM_OK : retval); } // [lreverse] static __device__ int Jim_LreverseCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 2) { Jim_WrongNumArgs(interp, 1, argv, "list"); return JIM_ERROR; } int len; Jim_Obj **ele; JimListGetElements(interp, argv[1], &len, &ele); len--; Jim_Obj *revObjPtr = Jim_NewListObj(interp, NULL, 0); while (len >= 0) ListAppendElement(revObjPtr, ele[len--]); Jim_SetResult(interp, revObjPtr); return JIM_OK; } static __device__ int JimRangeLen(jim_wide start, jim_wide end, jim_wide step) { if (step == 0) return -1; if (start == end) return 0; else if (step > 0 && start > end) return -1; else if (step < 0 && end > start) return -1; jim_wide len = end - start; if (len < 0) len = -len; // abs(len) if (step < 0) step = -step; // abs(step) len = 1 + ((len - 1) / step); // We can truncate safely to INT_MAX, the range command will always return an error for a such long range because Tcl lists can't be so long. if (len > INT_MAX) len = INT_MAX; return (int)(len < 0 ? -1 : len); } // [range] static __device__ int Jim_RangeCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 2 || argc > 4) { Jim_WrongNumArgs(interp, 1, argv, "?start? end ?step?"); return JIM_ERROR; } jim_wide start = 0, end, step = 1; if (argc == 2) { if (Jim_GetWide(interp, argv[1], &end) != JIM_OK) return JIM_ERROR; } else { if (Jim_GetWide(interp, argv[1], &start) != JIM_OK || Jim_GetWide(interp, argv[2], &end) != JIM_OK) return JIM_ERROR; if (argc == 4 && Jim_GetWide(interp, argv[3], &step) != JIM_OK) return JIM_ERROR; } int len; if ((len = JimRangeLen(start, end, step)) == -1) { Jim_SetResultString(interp, "Invalid (infinite?) range specified", -1); return JIM_ERROR; } Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); for (int i = 0; i < len; i++) ListAppendElement(objPtr, Jim_NewIntObj(interp, start + i * step)); Jim_SetResult(interp, objPtr); return JIM_OK; } // [rand] static __device__ int Jim_RandCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc < 1 || argc > 3) { Jim_WrongNumArgs(interp, 1, argv, "?min? max"); return JIM_ERROR; } jim_wide min = 0, max = 0, len, maxMul; if (argc == 1) { max = JIM_WIDE_MAX; } else if (argc == 2) { if (Jim_GetWide(interp, argv[1], &max) != JIM_OK) return JIM_ERROR; } else if (argc == 3) { if (Jim_GetWide(interp, argv[1], &min) != JIM_OK || Jim_GetWide(interp, argv[2], &max) != JIM_OK) return JIM_ERROR; } len = max-min; if (len < 0) { Jim_SetResultString(interp, "Invalid arguments (max < min)", -1); return JIM_ERROR; } maxMul = JIM_WIDE_MAX - (len ? (JIM_WIDE_MAX%len) : 0); while (1) { jim_wide r; JimRandomBytes(interp, &r, sizeof(jim_wide)); if (r < 0 || r >= maxMul) continue; r = (len == 0 ? 0 : r%len); Jim_SetResultInt(interp, min+r); return JIM_OK; } } static __device__ int InterpObjCmd(ClientData clientData, Jim_Interp *interp, int argc, Jim_Obj *const args[]) { if (argc < 2) { Jim_WrongNumArgs(interp, 1, args, "SUBCOMMAND ..."); return JIM_ERROR; } return JIM_OK; } static __device__ void InterpDeleteCmd(ClientData data, Jim_Interp *interp) { Jim_Interp *p = (Jim_Interp *)data; Jim_FreeInterp(p); } // [interp] *added* __constant__ static const char *const _interp_commands[] = { "create", "alias", "eval", "delete", NULL }; static __device__ int Jim_InterpCoreCommand(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv) { enum { INTERP_CREATE, INTERP_ALIAS, INTERP_EVAL, INTERP_DELETE }; if (argc < 2) { Jim_WrongNumArgs(interp, 1, argv, "subcommand ?args ...?"); return JIM_ERROR; } int cmd; if (Jim_GetEnum(interp, argv[1], _interp_commands, &cmd, "subcommand", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) return JIM_ERROR; switch (cmd) { case INTERP_CREATE: { const char *arg; //bool safe = false; if (argc == 4) { arg = Jim_String(argv[1]); if (!strcmp(arg, "-safe")) { argc--; //safe = true; } } if (argc < 3) { Jim_WrongNumArgs(interp, 2, argv, "CREATE ?-safe? path"); return JIM_ERROR; } Jim_Interp *p = Jim_CreateInterp(); if (!p) { Jim_SetResultString(interp, "malloc failed", -1); return JIM_ERROR; } Jim_RegisterCoreCommands(p); arg = Jim_String(argv[2]); Jim_CreateCommand(interp, arg, (Jim_CmdProc *)InterpObjCmd, (ClientData)p, InterpDeleteCmd); return JIM_OK; } case INTERP_ALIAS: { return JIM_OK; } case INTERP_EVAL: { if (argc < 3) { Jim_WrongNumArgs(interp, 2, argv, "name arg ?arg ...?"); return JIM_ERROR; } Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) { Jim_SetResultFormatted(interp, "Unable to find interp \"%#s\"", Jim_String(argv[2])); return JIM_ERROR; } if (cmdPtr->isproc) { Jim_SetResultFormatted(interp, "Variable \"%#s\" is a procedure", Jim_String(argv[2])); return JIM_ERROR; } Jim_Interp *p = (Jim_Interp *)cmdPtr->u.native.privData; int rc = Jim_EvalObj(p, argc == 4 ? argv[3] : Jim_ConcatObj(p, argc - 3, argv + 3)); // eval is "interesting", so add a stack frame here if (rc == JIM_ERROR) { Jim_Obj *scriptObjPtr = p->currentScriptObj; ScriptObj *script = JimGetScript(p, scriptObjPtr); if (!JimScriptValid(p, script)) { Jim_DecrRefCount(p, scriptObjPtr); return JIM_ERROR; } //interp->errorFileNameObj = p->errorFileNameObj; //interp->errorFlag = p->errorFlag; //interp->errorLine = p->errorLine; //interp->errorProc = p->errorProc; JimAddErrorToStack(interp, script); } return rc; } case INTERP_DELETE: { Jim_DeleteCommand(interp, Jim_String(argv[2])); return JIM_OK; } } return JIM_OK; } __constant__ static const struct { const char *name; Jim_CmdProc *cmdProc; } Jim_CoreCommandsTable[] = { {"alias", Jim_AliasCoreCommand}, {"set", Jim_SetCoreCommand}, {"unset", Jim_UnsetCoreCommand}, {"puts", Jim_PutsCoreCommand}, {"+", Jim_AddCoreCommand}, {"*", Jim_MulCoreCommand}, {"-", Jim_SubCoreCommand}, {"/", Jim_DivCoreCommand}, {"incr", Jim_IncrCoreCommand}, {"while", Jim_WhileCoreCommand}, {"loop", Jim_LoopCoreCommand}, {"for", Jim_ForCoreCommand}, {"foreach", Jim_ForeachCoreCommand}, {"lmap", Jim_LmapCoreCommand}, {"lassign", Jim_LassignCoreCommand}, {"if", Jim_IfCoreCommand}, {"switch", Jim_SwitchCoreCommand}, {"list", Jim_ListCoreCommand}, {"lindex", Jim_LindexCoreCommand}, {"lset", Jim_LsetCoreCommand}, {"lsearch", Jim_LsearchCoreCommand}, {"llength", Jim_LlengthCoreCommand}, {"lappend", Jim_LappendCoreCommand}, {"linsert", Jim_LinsertCoreCommand}, {"lreplace", Jim_LreplaceCoreCommand}, {"lsort", Jim_LsortCoreCommand}, {"append", Jim_AppendCoreCommand}, {"debug", Jim_DebugCoreCommand}, {"eval", Jim_EvalCoreCommand}, {"uplevel", Jim_UplevelCoreCommand}, {"expr", Jim_ExprCoreCommand}, {"break", Jim_BreakCoreCommand}, {"continue", Jim_ContinueCoreCommand}, {"proc", Jim_ProcCoreCommand}, {"concat", Jim_ConcatCoreCommand}, {"return", Jim_ReturnCoreCommand}, {"upvar", Jim_UpvarCoreCommand}, {"global", JimGLOBAL_CoreCommand}, {"string", Jim_StringCoreCommand}, {"time", Jim_TimeCoreCommand}, {"exit", Jim_ExitCoreCommand}, {"catch", Jim_CatchCoreCommand}, #ifdef JIM_REFERENCES {"ref", Jim_RefCoreCommand}, {"getref", Jim_GetrefCoreCommand}, {"setref", Jim_SetrefCoreCommand}, {"finalize", Jim_FinalizeCoreCommand}, {"collect", Jim_CollectCoreCommand}, #endif {"rename", Jim_RenameCoreCommand}, {"dict", Jim_DictCoreCommand}, {"subst", Jim_SubstCoreCommand}, {"info", Jim_InfoCoreCommand}, {"exists", Jim_ExistsCoreCommand}, {"split", Jim_SplitCoreCommand}, {"join", Jim_JoinCoreCommand}, {"format", Jim_FormatCoreCommand}, {"scan", Jim_ScanCoreCommand}, {"error", Jim_ErrorCoreCommand}, {"lrange", Jim_LrangeCoreCommand}, {"lrepeat", Jim_LrepeatCoreCommand}, {"env", Jim_EnvCoreCommand}, {"source", Jim_SourceCoreCommand}, {"lreverse", Jim_LreverseCoreCommand}, {"range", Jim_RangeCoreCommand}, {"rand", Jim_RandCoreCommand}, {"tailcall", Jim_TailcallCoreCommand}, {"local", Jim_LocalCoreCommand}, {"upcall", Jim_UpcallCoreCommand}, {"apply", Jim_ApplyCoreCommand}, {"interp", Jim_InterpCoreCommand}, {NULL, NULL}, }; __device__ void Jim_RegisterCoreCommands(Jim_Interp *interp) { int i = 0; while (Jim_CoreCommandsTable[i].name != NULL) { Jim_CreateCommand(interp, Jim_CoreCommandsTable[i].name, Jim_CoreCommandsTable[i].cmdProc, NULL, NULL); i++; } } #pragma endregion // ----------------------------------------------------------------------------- // Interactive prompt // ----------------------------------------------------------------------------- #pragma region Interactive prompt __device__ void Jim_MakeErrorMessage(Jim_Interp *interp) { Jim_Obj *argv[2]; argv[0] = Jim_NewStringObj(interp, "errorInfo", -1); argv[1] = interp->result; Jim_EvalObjVector(interp, 2, argv); } static __device__ void JimSetFailedEnumResult(Jim_Interp *interp, const char *arg, const char *badtype, const char *prefix, const char *const *tablePtr, const char *name) { int count; for (count = 0; tablePtr[count]; count++) { } if (name == NULL) name = "option"; Jim_SetResultFormatted(interp, "%s%s \"%s\": must be ", badtype, name, arg); char **tablePtrSorted = (char **)Jim_Alloc(sizeof(char *) * count); memcpy(tablePtrSorted, tablePtr, sizeof(char *) * count); qsort(tablePtrSorted, count, sizeof(char *), qsortCompareStringPointers); for (int i = 0; i < count; i++) { if (i + 1 == count && count > 1) Jim_AppendString(interp, Jim_GetResult(interp), "or ", -1); Jim_AppendStrings(interp, Jim_GetResult(interp), prefix, tablePtrSorted[i], NULL); if (i + 1 != count) Jim_AppendString(interp, Jim_GetResult(interp), ", ", -1); } Jim_Free(tablePtrSorted); } __device__ int Jim_GetEnum(Jim_Interp *interp, Jim_Obj *objPtr, const char *const *tablePtr, int *indexPtr, const char *name, int flags) { const char *bad = "bad "; const char *const *entryPtr = NULL; int i; int arglen; const char *arg = Jim_GetString(objPtr, &arglen); int match = -1; *indexPtr = -1; for (entryPtr = tablePtr, i = 0; *entryPtr != NULL; entryPtr++, i++) { if (Jim_CompareStringImmediate(interp, objPtr, *entryPtr)) { // Found an exact match *indexPtr = i; return JIM_OK; } // Accept an unambiguous abbreviation. Note that '-' doesnt' consitute a valid abbreviation if (flags & JIM_ENUM_ABBREV && !strncmp(arg, *entryPtr, arglen)) { if (*arg == '-' && arglen == 1) break; if (match >= 0) { bad = "ambiguous "; goto ambiguous; } match = i; } } // If we had an unambiguous partial match if (match >= 0) { *indexPtr = match; return JIM_OK; } ambiguous: if (flags & JIM_ERRMSG) JimSetFailedEnumResult(interp, arg, bad, "", tablePtr, name); return JIM_ERROR; } __device__ int Jim_GetEnumFromStruct(Jim_Interp *interp, Jim_Obj *objPtr, const void **tablePtr, int elementSize, int *indexPtr, const char *name, int flags) { const char *bad = "bad "; const void **entryPtrStruct = NULL; const char *const *entryPtr = NULL; int i; int arglen; const char *arg = Jim_GetString(objPtr, &arglen); int match = -1; *indexPtr = -1; for (entryPtrStruct = tablePtr, i = 0; *entryPtrStruct != NULL; entryPtrStruct+=elementSize, i++) { entryPtr = (const char *const *)entryPtrStruct; if (Jim_CompareStringImmediate(interp, objPtr, *entryPtr)) { // Found an exact match *indexPtr = i; return JIM_OK; } // Accept an unambiguous abbreviation. Note that '-' doesnt' consitute a valid abbreviation if (flags & JIM_ENUM_ABBREV && !strncmp(arg, *entryPtr, arglen)) { if (*arg == '-' && arglen == 1) break; if (match >= 0) { bad = "ambiguous "; goto ambiguous; } match = i; } } // If we had an unambiguous partial match if (match >= 0) { *indexPtr = match; return JIM_OK; } ambiguous: if (flags & JIM_ERRMSG) JimSetFailedEnumResult(interp, arg, bad, "", (const char *const *)tablePtr, name); return JIM_ERROR; } __device__ int Jim_FindByName(const char *name, const char * const array[], size_t len) { for (int i = 0; i < (int)len; i++) if (array[i] && !strcmp(array[i], name)) return i; return -1; } __device__ int Jim_IsDict(Jim_Obj *objPtr) { return objPtr->typePtr == &_dictObjType; } __device__ int Jim_IsList(Jim_Obj *objPtr) { return objPtr->typePtr == &_listObjType; } // Very simple printf-like formatting, designed for error messages. // // The format may contain up to 5 '%s' or '%#s', corresponding to variable arguments. // The resulting string is created and set as the result. // // Each '%s' should correspond to a regular string parameter. // Each '%#s' should correspond to a (Jim_Obj *) parameter. // Any other printf specifier is not allowed (but %% is allowed for the % character). // // e.g. Jim_SetResultFormatted(interp, "Bad option \"%#s\" in proc \"%#s\"", optionObjPtr, procNamePtr); // // Note: We take advantage of the fact that printf has the same behaviour for both %s and %#s __device__ void Jim_SetResultFormatted(Jim_Interp *interp, const char *format, ...) { va_list va; va_start(va, format); // Initial space needed int len = strlen(format); int extra = 0; int n = 0; const char *params[5]; for (int i = 0; i < len && n < 5; i++) { int l; if (!strncmp(format + i, "%s", 2)) { params[n] = va_arg(va, char *); l = strlen(params[n]); } else if (!strncmp(format + i, "%#s", 3)) { Jim_Obj *objPtr = va_arg(va, Jim_Obj *); params[n] = Jim_GetString(objPtr, &l); } else { if (format[i] == '%') i++; continue; } n++; extra += l; } len += extra; char *buf = (char *)Jim_Alloc(len + 1); len = snprintf(buf, len + 1, format, params[0], params[1], params[2], params[3], params[4]); Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, buf, len)); va_end(va); } #pragma endregion // ----------------------------------------------------------------------------- // CommandInfo Command *Added* // ----------------------------------------------------------------------------- #pragma region CommandInfo Command *Added* __device__ int Jim_GetCommandInfoStr(Jim_Interp *interp, const char *name, Jim_CmdInfo *cmdInfo) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); int ret = Jim_GetCommandInfo(interp, nameObjPtr, cmdInfo); Jim_DecrRefCount(interp, nameObjPtr); return ret; } __device__ int Jim_GetCommandInfo(Jim_Interp *interp, Jim_Obj *objPtr, Jim_CmdInfo *cmdInfo) { Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, objPtr, JIM_ERRMSG)) == NULL) return 0; if (cmdPtr->isproc) { Jim_SetResultFormatted(interp, "command \"%#s\" is a procedure", objPtr); return 0; } cmdInfo->objProc = cmdPtr->u.native.cmdProc; cmdInfo->objClientData = cmdPtr->u.native.privData; cmdInfo->deleteProc = (void *)cmdPtr->u.native.delProc; return 1; } __device__ int Jim_SetCommandInfoStr(Jim_Interp *interp, const char *name, Jim_CmdInfo *cmdInfo) { Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_IncrRefCount(nameObjPtr); int ret = Jim_SetCommandInfo(interp, nameObjPtr, cmdInfo); Jim_DecrRefCount(interp, nameObjPtr); return ret; } __device__ int Jim_SetCommandInfo(Jim_Interp *interp, Jim_Obj *objPtr, Jim_CmdInfo *cmdInfo) { Jim_Cmd *cmdPtr; if ((cmdPtr = Jim_GetCommand(interp, objPtr, JIM_ERRMSG)) == NULL) return 0; if (cmdPtr->isproc) { Jim_SetResultFormatted(interp, "command \"%#s\" is a procedure", objPtr); return 0; } cmdPtr->u.native.cmdProc = cmdInfo->objProc; cmdPtr->u.native.privData = cmdInfo->objClientData; cmdPtr->u.native.delProc = (Jim_DelCmdProc *)cmdInfo->deleteProc; return 1; } #pragma endregion // ----------------------------------------------------------------------------- // Variable Command *Added* // ----------------------------------------------------------------------------- #pragma region Variable Command *Added* __device__ Jim_Obj *Jim_GetVar2(Jim_Interp *interp, const char *name, const char *key, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, key, -1); Jim_Obj *keyObjPtr = Jim_NewStringObj(interp, key, -1); Jim_IncrRefCount(nameObjPtr); Jim_IncrRefCount(keyObjPtr); Jim_Obj *obj; Jim_DictKeysVector(interp, nameObjPtr, &keyObjPtr, 1, &obj, flags); Jim_DecrRefCount(interp, keyObjPtr); Jim_DecrRefCount(interp, nameObjPtr); if (global) interp->framePtr = savedFramePtr; return obj; } __device__ int Jim_SetVar2(Jim_Interp *interp, const char *name, const char *key, const char *val, int flags) { Jim_CallFrame *savedFramePtr; int global = (flags & JIMGLOBAL_); if (global) { savedFramePtr = interp->framePtr; interp->framePtr = interp->topFramePtr; } Jim_Obj *nameObjPtr = Jim_NewStringObj(interp, name, -1); Jim_Obj *keyObjPtr = Jim_NewStringObj(interp, key, -1); Jim_Obj *valObjPtr = Jim_NewStringObj(interp, val, -1); Jim_IncrRefCount(nameObjPtr); Jim_IncrRefCount(keyObjPtr); Jim_IncrRefCount(valObjPtr); int ret = Jim_SetDictKeysVector(interp, nameObjPtr, &keyObjPtr, 1, valObjPtr, 0); Jim_DecrRefCount(interp, valObjPtr); Jim_DecrRefCount(interp, keyObjPtr); Jim_DecrRefCount(interp, nameObjPtr); if (global) interp->framePtr = savedFramePtr; return ret; } #pragma endregion // stubs #ifndef jim_ext_package __device__ int Jim_PackageProvide(Jim_Interp *interp, const char *name, const char *ver, int flags) { return JIM_OK; } #endif #ifndef jim_ext_aio __device__ FILE *Jim_AioFilehandle(Jim_Interp *interp, Jim_Obj *fhObj) { Jim_SetResultString(interp, "aio not enabled", -1); return NULL; } __device__ int Jim_MakeTempFile(Jim_Interp *interp, const char *template_) { Jim_SetResultString(interp, "platform has no tempfile support", -1); return -1; } #endif #include "jim-eventloop.h" __device__ int Jim_InitStaticExtensions(Jim_Interp *interp) { extern __device__ int Jim_bootstrapInit(Jim_Interp *interp); extern __device__ int Jim_globInit(Jim_Interp *interp); extern __device__ int Jim_stdlibInit(Jim_Interp *interp); extern __device__ int Jim_tclcompatInit(Jim_Interp *interp); // extern __device__ int Jim_aioInit(Jim_Interp *interp); extern __device__ int Jim_arrayInit(Jim_Interp *interp); extern __device__ int Jim_clockInit(Jim_Interp *interp); extern __device__ int Jim_execInit(Jim_Interp *interp); extern __device__ int Jim_fileInit(Jim_Interp *interp); extern __device__ int Jim_readdirInit(Jim_Interp *interp); extern __device__ int Jim_regexpInit(Jim_Interp *interp); // #if __CUDACC__ extern __device__ int Jim_gpuInit(Jim_Interp *interp); #else extern __device__ int Jim_win32Init(Jim_Interp *interp); #endif extern __device__ int Jim_historyInit(Jim_Interp *interp); extern __device__ int Jim_loadInit(Jim_Interp *interp); extern __device__ int Jim_namespaceInit(Jim_Interp *interp); extern __device__ int Jim_packInit(Jim_Interp *interp); extern __device__ int Jim_packageInit(Jim_Interp *interp); //extern __device__ int Jim_tclprefixInit(Jim_Interp *interp); Jim_bootstrapInit(interp); Jim_globInit(interp); Jim_stdlibInit(interp); //Jim_tclcompatInit(interp); // Jim_aioInit(interp); Jim_arrayInit(interp); Jim_clockInit(interp); Jim_eventloopInit(interp); Jim_execInit(interp); Jim_fileInit(interp); Jim_readdirInit(interp); Jim_regexpInit(interp); // #if __CUDACC__ Jim_gpuInit(interp); #else //Jim_win32Init(interp); #endif #ifndef __CUDACC__ Jim_historyInit(interp); Jim_loadInit(interp); Jim_namespaceInit(interp); Jim_packInit(interp); Jim_packageInit(interp); //Jim_tclprefixInit(interp); #endif return JIM_OK; }
ebbb230a8ffb19654cd6f9d921f783171d2cb5ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Timer.hpp> #include <iostream> #include <iomanip> using LOFAR::NSTimer; using std::cout; using std::cerr; using std::endl; using std::fixed; using std::setprecision; // function to check if there are any cuda errors void cudaErrorCheck(hipError_t error){ if (error != hipSuccess) { fprintf(stderr, "cuda Error: %s\n", hipGetErrorString(error)); exit(1); } } // Device code __global__ void darkGray_kernel(const int width, const int height, const int size, const unsigned char * inputImageR, const unsigned char * inputImageG, const unsigned char * inputImageB, unsigned char * darkGrayImage) { int gridStride = blockDim.x * gridDim.x; for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += gridStride) { darkGrayImage[pos] = ((0.3f * (float)inputImageR[pos]) + (0.59f * (float)inputImageG[pos]) + (0.11f * (float)inputImageB[pos])) * 0.6f + 0.5f; } } // Host code void darkGray(const int width, const int height, const unsigned char * inputImage, unsigned char * darkGrayImage) { // initialize timers NSTimer kernelTime = NSTimer("kernelDarker", false, false); NSTimer allocationTime = NSTimer("allocationDarker", false, false); NSTimer initTime = NSTimer("initDarker", false, false); NSTimer copyDeviceTime = NSTimer("copyDeviceDarker", false, false); NSTimer copyHostTime = NSTimer("copyHostDarker", false, false); NSTimer freeTime = NSTimer("freeDarker", false, false); // init vars hipError_t error = hipSuccess; unsigned char *inputImageDeviceR,*inputImageDeviceG, *inputImageDeviceB, *darkGrayImageDevice; int sizeImage = width * height; // init call to setup cuda initTime.start(); hipSetDevice(0); initTime.stop(); // allocate images in device memory allocationTime.start(); error = hipMalloc(&inputImageDeviceR, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); error = hipMalloc(&inputImageDeviceG, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); error = hipMalloc(&inputImageDeviceB, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); error = hipMalloc(&darkGrayImageDevice, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); allocationTime.stop(); // Copy image from host to device copyDeviceTime.start(); error = hipMemcpy(inputImageDeviceR, inputImage, sizeImage, hipMemcpyHostToDevice); cudaErrorCheck(error); error = hipMemcpy(inputImageDeviceG, inputImage+sizeImage, sizeImage, hipMemcpyHostToDevice); cudaErrorCheck(error); error = hipMemcpy(inputImageDeviceB, inputImage+(sizeImage*2), sizeImage, hipMemcpyHostToDevice); cudaErrorCheck(error); copyDeviceTime.stop(); // number of SM's for GeForce GTX 480 int numSMs = 32; // number of threads per block for GeForce GTX 480 int threadsPerBlock = 1024; // must be a multiple of num SM's for optimal performance int numBlocks = 32*numSMs; // start the kernel kernelTime.start(); hipLaunchKernelGGL(( darkGray_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, width, height, sizeImage, inputImageDeviceR, inputImageDeviceG, inputImageDeviceB, darkGrayImageDevice); cudaErrorCheck(hipGetLastError()); hipDeviceSynchronize(); kernelTime.stop(); // Copy the result from device to host copyHostTime.start(); error = hipMemcpy(darkGrayImage, darkGrayImageDevice, sizeImage, hipMemcpyDeviceToHost); cudaErrorCheck(error); copyHostTime.stop(); // Free the images in the device memory freeTime.start(); hipFree(inputImageDeviceR); hipFree(inputImageDeviceG); hipFree(inputImageDeviceB); hipFree(darkGrayImageDevice); freeTime.stop(); // output times cout << fixed << setprecision(6) << "Initalization time: " << initTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Allocation time: " << allocationTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Copy to device time:" << copyDeviceTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Kernel time:" << kernelTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Copy to host time:" << copyHostTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Free time:" << freeTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "GFLOP/s:" << (static_cast< long long unsigned int >(width) * height * 7) / 1000000000.0 / kernelTime.getElapsed() << endl; cout << fixed << setprecision(6) << "GB/s:" << (static_cast< long long unsigned int >(width) * height * (4 * sizeof(unsigned char))) / 1000000000.0 / kernelTime.getElapsed() << endl; }
ebbb230a8ffb19654cd6f9d921f783171d2cb5ac.cu
#include <Timer.hpp> #include <iostream> #include <iomanip> using LOFAR::NSTimer; using std::cout; using std::cerr; using std::endl; using std::fixed; using std::setprecision; // function to check if there are any cuda errors void cudaErrorCheck(cudaError_t error){ if (error != cudaSuccess) { fprintf(stderr, "cuda Error: %s\n", cudaGetErrorString(error)); exit(1); } } // Device code __global__ void darkGray_kernel(const int width, const int height, const int size, const unsigned char * inputImageR, const unsigned char * inputImageG, const unsigned char * inputImageB, unsigned char * darkGrayImage) { int gridStride = blockDim.x * gridDim.x; for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += gridStride) { darkGrayImage[pos] = ((0.3f * (float)inputImageR[pos]) + (0.59f * (float)inputImageG[pos]) + (0.11f * (float)inputImageB[pos])) * 0.6f + 0.5f; } } // Host code void darkGray(const int width, const int height, const unsigned char * inputImage, unsigned char * darkGrayImage) { // initialize timers NSTimer kernelTime = NSTimer("kernelDarker", false, false); NSTimer allocationTime = NSTimer("allocationDarker", false, false); NSTimer initTime = NSTimer("initDarker", false, false); NSTimer copyDeviceTime = NSTimer("copyDeviceDarker", false, false); NSTimer copyHostTime = NSTimer("copyHostDarker", false, false); NSTimer freeTime = NSTimer("freeDarker", false, false); // init vars cudaError_t error = cudaSuccess; unsigned char *inputImageDeviceR,*inputImageDeviceG, *inputImageDeviceB, *darkGrayImageDevice; int sizeImage = width * height; // init call to setup cuda initTime.start(); cudaSetDevice(0); initTime.stop(); // allocate images in device memory allocationTime.start(); error = cudaMalloc(&inputImageDeviceR, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); error = cudaMalloc(&inputImageDeviceG, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); error = cudaMalloc(&inputImageDeviceB, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); error = cudaMalloc(&darkGrayImageDevice, sizeImage * sizeof(unsigned char)); cudaErrorCheck(error); allocationTime.stop(); // Copy image from host to device copyDeviceTime.start(); error = cudaMemcpy(inputImageDeviceR, inputImage, sizeImage, cudaMemcpyHostToDevice); cudaErrorCheck(error); error = cudaMemcpy(inputImageDeviceG, inputImage+sizeImage, sizeImage, cudaMemcpyHostToDevice); cudaErrorCheck(error); error = cudaMemcpy(inputImageDeviceB, inputImage+(sizeImage*2), sizeImage, cudaMemcpyHostToDevice); cudaErrorCheck(error); copyDeviceTime.stop(); // number of SM's for GeForce GTX 480 int numSMs = 32; // number of threads per block for GeForce GTX 480 int threadsPerBlock = 1024; // must be a multiple of num SM's for optimal performance int numBlocks = 32*numSMs; // start the kernel kernelTime.start(); darkGray_kernel<<<numBlocks, threadsPerBlock>>>(width, height, sizeImage, inputImageDeviceR, inputImageDeviceG, inputImageDeviceB, darkGrayImageDevice); cudaErrorCheck(cudaGetLastError()); cudaDeviceSynchronize(); kernelTime.stop(); // Copy the result from device to host copyHostTime.start(); error = cudaMemcpy(darkGrayImage, darkGrayImageDevice, sizeImage, cudaMemcpyDeviceToHost); cudaErrorCheck(error); copyHostTime.stop(); // Free the images in the device memory freeTime.start(); cudaFree(inputImageDeviceR); cudaFree(inputImageDeviceG); cudaFree(inputImageDeviceB); cudaFree(darkGrayImageDevice); freeTime.stop(); // output times cout << fixed << setprecision(6) << "Initalization time: " << initTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Allocation time: " << allocationTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Copy to device time:" << copyDeviceTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Kernel time:" << kernelTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Copy to host time:" << copyHostTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "Free time:" << freeTime.getElapsed() << setprecision(3) << endl; cout << fixed << setprecision(6) << "GFLOP/s:" << (static_cast< long long unsigned int >(width) * height * 7) / 1000000000.0 / kernelTime.getElapsed() << endl; cout << fixed << setprecision(6) << "GB/s:" << (static_cast< long long unsigned int >(width) * height * (4 * sizeof(unsigned char))) / 1000000000.0 / kernelTime.getElapsed() << endl; }
f3b15c4ee94c96328bed826f014431b9722f36b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/utilities/block_utils.cuh> #include <hipcub/hipcub.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { struct comp_in_out { uint8_t const* in_ptr; size_t in_size; uint8_t* out_ptr; size_t out_size; }; struct compressed_stream_s { CompressedStreamInfo info; comp_in_out ctl; }; // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr, bool allow_block_size_estimate) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; uint8_t* uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t max_uncompressed_block_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size; device_span<uint8_t const>* init_in_ctl = nullptr; device_span<uint8_t>* init_out_ctl = nullptr; block_len >>= 1; cur += block_header_size; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; max_uncompressed_block_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : allow_block_size_estimate && (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_in_ctl = (s->info.copy_in_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_in_ctl[num_uncompressed_blocks] : nullptr; init_out_ctl = (s->info.copy_out_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_out_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_in_ctl = (s->info.dec_in_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_in_ctl[num_compressed_blocks] : nullptr; init_out_ctl = (s->info.dec_out_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_out_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_in_ctl) { s->ctl = {cur, block_len, uncompressed + max_uncompressed_size, uncompressed_size}; } __syncwarp(); if (init_in_ctl && lane_id == 0) { *init_in_ctl = {s->ctl.in_ptr, s->ctl.in_size}; *init_out_ctl = {s->ctl.out_ptr, s->ctl.out_size}; } cur += block_len; max_uncompressed_size += uncompressed_size; max_uncompressed_block_size = max(max_uncompressed_block_size, uncompressed_size); } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; s->info.max_uncompressed_block_size = max_uncompressed_block_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; auto dec_out = s->info.dec_out_ctl; auto dec_result = s->info.dec_res; uint8_t* uncompressed_actual = s->info.uncompressed_data; uint8_t* uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += block_header_size; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } uint32_t const dst_size = dec_out[num_compressed_blocks].size(); uncompressed_size_est = shuffle((lane_id == 0) ? dst_size : 0); uint32_t const bytes_written = dec_result[num_compressed_blocks].bytes_written; uncompressed_size_actual = shuffle((lane_id == 0) ? bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s, uint8_t const* const start, uint8_t const* const end) { constexpr uint32_t pb_rowindexentry_id = ProtofType::FIXEDLEN + 8; const uint8_t* cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == ProtofType::FIXED64) cur += 8; else if (v == ProtofType::FIXED32) cur += 4; else if (v == ProtofType::VARINT) state = SKIP_VARINT; else if (v == ProtofType::FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT; idx_id++; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups) { const uint8_t* index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { const uint8_t* start = s->strm_info[ci_id].compressed_data; const uint8_t* cur = start; const uint8_t* end = cur + s->strm_info[ci_id].compressed_data_size; auto dec_result = s->strm_info[ci_id].dec_res.data(); uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len; if (cur + block_header_size > end || cur + block_header_size >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += block_header_size; auto const is_uncompressed = static_cast<bool>(block_len & 1); block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += dec_result->bytes_written; dec_result++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] rowidx_stride Row index stride * @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed * value */ // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s* const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != nullptr); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { auto const num_rows = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows; auto const start_row = (use_base_stride) ? i * rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row; for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t*)&s->rowgroups[i])[j]; } row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows; // Updating in case of struct row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows; row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row; } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } template <int block_size> __global__ void __launch_bounds__(block_size) gpu_reduce_pushdown_masks(device_span<orc_column_device_view const> orc_columns, device_2dspan<rowgroup_rows const> rowgroup_bounds, device_2dspan<size_type> set_counts) { using BlockReduce = hipcub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; auto const column_id = blockIdx.x; auto const rowgroup_id = blockIdx.y; auto const column = orc_columns[column_id]; auto const t = threadIdx.x; auto const use_child_rg = column.type().id() == type_id::LIST; auto const rg = rowgroup_bounds[rowgroup_id][column_id + (use_child_rg ? 1 : 0)]; if (column.pushdown_mask == nullptr) { // All elements are valid if the null mask is not present if (t == 0) { set_counts[rowgroup_id][column_id] = rg.size(); } return; }; size_type count = 0; static constexpr size_type bits_per_word = sizeof(bitmask_type) * 8; for (auto row = t * bits_per_word + rg.begin; row < rg.end; row += block_size * bits_per_word) { auto const begin_bit = row; auto const end_bit = min(static_cast<size_type>(row + bits_per_word), rg.end); auto const mask_len = end_bit - begin_bit; auto const mask_word = cudf::detail::get_mask_offset_word(column.pushdown_mask, 0, row, end_bit) & ((1 << mask_len) - 1); count += __popc(mask_word); } count = BlockReduce(temp_storage).Sum(count); if (t == 0) { set_counts[rowgroup_id][column_id] = count; } } void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, bool allow_block_size_estimate, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuParseCompressedStripeData), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams, compression_block_size, log2maxcr, allow_block_size_estimate); } void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuPostDecompressionReassemble), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams); } void __host__ ParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block hipLaunchKernelGGL(( gpuParseRowGroupIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride, use_base_stride); } void __host__ reduce_pushdown_masks(device_span<orc_column_device_view const> columns, device_2dspan<rowgroup_rows const> rowgroups, device_2dspan<cudf::size_type> valid_counts, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(columns.size(), rowgroups.size().first); // 1 rowgroup per block hipLaunchKernelGGL(( gpu_reduce_pushdown_masks<128>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), columns, rowgroups, valid_counts); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
f3b15c4ee94c96328bed826f014431b9722f36b6.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/utilities/block_utils.cuh> #include <cub/cub.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { struct comp_in_out { uint8_t const* in_ptr; size_t in_size; uint8_t* out_ptr; size_t out_size; }; struct compressed_stream_s { CompressedStreamInfo info; comp_in_out ctl; }; // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr, bool allow_block_size_estimate) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; uint8_t* uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t max_uncompressed_block_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size; device_span<uint8_t const>* init_in_ctl = nullptr; device_span<uint8_t>* init_out_ctl = nullptr; block_len >>= 1; cur += block_header_size; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; max_uncompressed_block_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : allow_block_size_estimate && (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_in_ctl = (s->info.copy_in_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_in_ctl[num_uncompressed_blocks] : nullptr; init_out_ctl = (s->info.copy_out_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_out_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_in_ctl = (s->info.dec_in_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_in_ctl[num_compressed_blocks] : nullptr; init_out_ctl = (s->info.dec_out_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_out_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_in_ctl) { s->ctl = {cur, block_len, uncompressed + max_uncompressed_size, uncompressed_size}; } __syncwarp(); if (init_in_ctl && lane_id == 0) { *init_in_ctl = {s->ctl.in_ptr, s->ctl.in_size}; *init_out_ctl = {s->ctl.out_ptr, s->ctl.out_size}; } cur += block_len; max_uncompressed_size += uncompressed_size; max_uncompressed_block_size = max(max_uncompressed_block_size, uncompressed_size); } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; s->info.max_uncompressed_block_size = max_uncompressed_block_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; auto dec_out = s->info.dec_out_ctl; auto dec_result = s->info.dec_res; uint8_t* uncompressed_actual = s->info.uncompressed_data; uint8_t* uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += block_header_size; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } uint32_t const dst_size = dec_out[num_compressed_blocks].size(); uncompressed_size_est = shuffle((lane_id == 0) ? dst_size : 0); uint32_t const bytes_written = dec_result[num_compressed_blocks].bytes_written; uncompressed_size_actual = shuffle((lane_id == 0) ? bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s, uint8_t const* const start, uint8_t const* const end) { constexpr uint32_t pb_rowindexentry_id = ProtofType::FIXEDLEN + 8; const uint8_t* cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == ProtofType::FIXED64) cur += 8; else if (v == ProtofType::FIXED32) cur += 4; else if (v == ProtofType::VARINT) state = SKIP_VARINT; else if (v == ProtofType::FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT; idx_id++; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups) { const uint8_t* index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { const uint8_t* start = s->strm_info[ci_id].compressed_data; const uint8_t* cur = start; const uint8_t* end = cur + s->strm_info[ci_id].compressed_data_size; auto dec_result = s->strm_info[ci_id].dec_res.data(); uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len; if (cur + block_header_size > end || cur + block_header_size >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += block_header_size; auto const is_uncompressed = static_cast<bool>(block_len & 1); block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += dec_result->bytes_written; dec_result++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] rowidx_stride Row index stride * @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed * value */ // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s* const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != nullptr); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { auto const num_rows = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows; auto const start_row = (use_base_stride) ? i * rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row; for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t*)&s->rowgroups[i])[j]; } row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows; // Updating in case of struct row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows; row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row; } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } template <int block_size> __global__ void __launch_bounds__(block_size) gpu_reduce_pushdown_masks(device_span<orc_column_device_view const> orc_columns, device_2dspan<rowgroup_rows const> rowgroup_bounds, device_2dspan<size_type> set_counts) { using BlockReduce = cub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; auto const column_id = blockIdx.x; auto const rowgroup_id = blockIdx.y; auto const column = orc_columns[column_id]; auto const t = threadIdx.x; auto const use_child_rg = column.type().id() == type_id::LIST; auto const rg = rowgroup_bounds[rowgroup_id][column_id + (use_child_rg ? 1 : 0)]; if (column.pushdown_mask == nullptr) { // All elements are valid if the null mask is not present if (t == 0) { set_counts[rowgroup_id][column_id] = rg.size(); } return; }; size_type count = 0; static constexpr size_type bits_per_word = sizeof(bitmask_type) * 8; for (auto row = t * bits_per_word + rg.begin; row < rg.end; row += block_size * bits_per_word) { auto const begin_bit = row; auto const end_bit = min(static_cast<size_type>(row + bits_per_word), rg.end); auto const mask_len = end_bit - begin_bit; auto const mask_word = cudf::detail::get_mask_offset_word(column.pushdown_mask, 0, row, end_bit) & ((1 << mask_len) - 1); count += __popc(mask_word); } count = BlockReduce(temp_storage).Sum(count); if (t == 0) { set_counts[rowgroup_id][column_id] = count; } } void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, bool allow_block_size_estimate, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuParseCompressedStripeData<<<dim_grid, dim_block, 0, stream.value()>>>( strm_info, num_streams, compression_block_size, log2maxcr, allow_block_size_estimate); } void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuPostDecompressionReassemble<<<dim_grid, dim_block, 0, stream.value()>>>(strm_info, num_streams); } void __host__ ParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block gpuParseRowGroupIndex<<<dim_grid, dim_block, 0, stream.value()>>>(row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride, use_base_stride); } void __host__ reduce_pushdown_masks(device_span<orc_column_device_view const> columns, device_2dspan<rowgroup_rows const> rowgroups, device_2dspan<cudf::size_type> valid_counts, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(columns.size(), rowgroups.size().first); // 1 rowgroup per block gpu_reduce_pushdown_masks<128> <<<dim_grid, dim_block, 0, stream.value()>>>(columns, rowgroups, valid_counts); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
825aa1f0ce1bc073ada37ee66327917049e85859.hip
// !!! This is a file automatically generated by hipify!!! //#define MAIN_PROGRAM // c standard headers #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <cblas.h> #include <float.h> #include <sys/time.h> // own c headers #include "../common.h" #include "../matrix_operations/tensor.h" #include "../matrix_operations/matrix_operator.h" #include "../matrix_operations/matrix_operator_gpu.h" #include "../matrix_operations/test_matrix_operator.h" // cublas headers #include "rocblas.h" #include <hip/hip_runtime.h> #include <cblas.h> int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("\nTiming Matrix Multiplication at"); printf("device %d: %s \n\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); printf("Performs the following timings:\n\n - matMul on HOST and matMul_gpu1, matMul_gpu2 matMul_gpu_dsm, matMul_gpu_dsm_coa, matMul_cublas,matMul_gpu_sm_tr,matMul_gpu_sm_tr_ind on Device with and without copying\n \n"); printf("\n_________________________________________________\n"); // GPU Functions srand(seconds()); // Initialization, should only be called once. double start,t,t1,t2,t3,t4,t5,t6,t7,t8,t9; FILE *fp = fopen("../analysis/matMulTimes.txt", "w"); fprintf(fp,"N\tT_B\tMM1\tMM2\tDSM\tDSM_COA\tcuBlas\tSM\tSM_tr\tCPU\tSM_trInd\n"); printf("N\tT_B\tMM1\tMM2\tDSM\tDSM_COA\tcuBlas\tSM\tSM_tr\tCPU\tSM_trInd\n"); int threads_block; //maximum shift for maximum dimension size int max_shift=11; // loop over dimension sizes for(int i=0;i<=max_shift;i++){ // set times on max for every size t1=t2=t3=t4=t5=t6=t7=t8=t9=DBL_MAX; // set up dimensions and arrays int N=1<<i; int dimsA[2]={N,N}; int dimsB[2]={N,N}; int A_nelem=dimsA[0]*dimsA[1]; int B_nelem=dimsB[0]*dimsB[1]; int C_nelem=dimsA[0]*dimsB[1]; double *A = (double *)malloc(A_nelem*sizeof(double)); double *B = (double *)malloc(B_nelem*sizeof(double)); double *C = (double *)malloc(C_nelem*sizeof(double)); // set sqrt(threads_block) for(int k=8;k<=32;k*=2){ threads_block=k*k; // best of 3 for (int j=0;j<3;j++){ create_random_matrix(A,A_nelem,0,10); create_random_matrix(B,B_nelem,0,10); start=seconds(); matMul_gpu1(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t1=(t<t1) ? t : t1 ; start=seconds(); matMul_gpu2(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t2=(t<t2) ? t : t2 ; start=seconds(); matMul_gpu_dsm(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t3=(t<t3) ? t : t3 ; start=seconds(); matMul_gpu_sm(A, B, dimsA[0],dimsB[1],dimsA[1],C); t=seconds()-start; t6=(t<t6) ? t : t6 ; start=seconds(); matMul_gpu_dsm_coa(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t4=(t<t4) ? t : t4 ; start=seconds(); matMul_cublas(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t5=(t<t5) ? t : t5 ; start=seconds(); matMul_gpu_sm_tr(A, B, NORMAL,NORMAL,dimsA[0],dimsA[1],dimsB[0],dimsB[1],C); t=seconds()-start; t7=(t<t7) ? t : t7 ; if (i<=9){ start=seconds(); matMul(A, B, dimsA[0],dimsB[1],dimsA[1],C); t=seconds()-start; t8=(t<t8) ? t : t8 ; } start=seconds(); matMul_gpu_sm_tr_ind(A, B, NORMAL,NORMAL,dimsA[0],dimsA[1],dimsB[0],dimsB[1],C); t=seconds()-start; t9=(t<t9) ? t : t9 ; } // print to file printf("%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); fprintf(fp,"%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); } free(A); free(B); free(C); } fclose (fp); // onDev functions FILE *fp2 = fopen("../analysis/matMulTimesOnDev.txt", "w"); fprintf(fp2,"N\tT_B\tMM1\tMM2\tDSM\tDSM_COA\tcuBlas\tSM\tSM_tr\tCPU\tSM_trInd\n"); printf("\nonDev\n"); for(int i=0;i<=max_shift;i++){ t1=t2=t3=t4=t5=t6=t7=t8=t9=DBL_MAX; int N=1<<i; int dimsD[2]={N,N}; int dimsE[2]={N,N}; int D_nelem=dimsD[0]*dimsD[1]; int E_nelem=dimsE[0]*dimsE[1]; int F_nelem=dimsD[0]*dimsE[1]; double *D = (double *)malloc(D_nelem*sizeof(double)); double *E = (double *)malloc(E_nelem*sizeof(double)); double *F = (double *)malloc(F_nelem*sizeof(double)); double *dev_D = (double *)malloc(D_nelem*sizeof(double)); double *dev_E = (double *)malloc(E_nelem*sizeof(double)); double *dev_F = (double *)malloc(F_nelem*sizeof(double)); CHECK(hipMalloc((void**)&dev_D, D_nelem*sizeof(double))); CHECK(hipMalloc((void**)&dev_E, E_nelem*sizeof(double))); CHECK(hipMalloc((void**)&dev_F, F_nelem*sizeof(double))); for(int k=8;k<=32;k*=2){ threads_block=k*k; // best of 3 for (int j=0;j<3;j++){ create_random_matrix(D,D_nelem,0,10); create_random_matrix(E,E_nelem,0,10); copy_host_to_device_double(D,dev_D,D_nelem); copy_host_to_device_double(E,dev_E,E_nelem); start=seconds(); matMul_onDev1(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t1=(t<t1) ? t : t1 ; start=seconds(); matMul_onDev2(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t2=(t<t2) ? t : t2 ; start=seconds(); matMul_dsm_onDev(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t3=(t<t3) ? t : t3 ; start=seconds(); matMul_sm_onDev(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F); t=seconds()-start; t6=(t<t6) ? t : t6 ; start=seconds(); matMul_dsm_coa_onDev(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t4=(t<t4) ? t : t4 ; hipblasStatus_t stat; hipblasHandle_t handle; stat = hipblasCreate(&handle); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); } const double alpha=1.0; const double beta=0.0; // Invoke kernel start=seconds(); hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dimsE[1], dimsD[0], dimsE[0],&alpha,(const double *)dev_E, dimsE[1],(const double *)dev_D, dimsD[1],&beta,(double *)dev_F, dimsE[1]); CHECK(hipDeviceSynchronize()); hipblasDestroy(handle); t=seconds()-start; t5=(t<t5) ? t : t5 ; start=seconds(); matMul_sm_onDev_tr(dev_D, dev_E, NORMAL,NORMAL,dimsD[0],dimsD[1],dimsE[0],dimsE[1],dev_F); t=seconds()-start; t7=(t<t7) ? t : t7 ; if (i<=9){ start=seconds(); matMul(D, E, dimsD[0],dimsE[1],dimsD[1],F); t=seconds()-start; t8=(t<t8) ? t : t8 ; } start=seconds(); matMul_sm_onDev_tr_ind(dev_D, dev_E, NORMAL,NORMAL,dimsD[0],dimsD[1],dimsE[0],dimsE[1],dev_F); t=seconds()-start; t9=(t<t9) ? t : t9 ; } // print to file printf("%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); fprintf(fp2,"%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); } free(D); free(E); free(F); CHECK(hipFree(dev_D)); CHECK(hipFree(dev_E)); CHECK(hipFree(dev_F)); } fclose (fp2); return EXIT_SUCCESS; }
825aa1f0ce1bc073ada37ee66327917049e85859.cu
//#define MAIN_PROGRAM // c standard headers #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <cblas.h> #include <float.h> #include <sys/time.h> // own c headers #include "../common.h" #include "../matrix_operations/tensor.h" #include "../matrix_operations/matrix_operator.h" #include "../matrix_operations/matrix_operator_gpu.h" #include "../matrix_operations/test_matrix_operator.h" // cublas headers #include "cublas_v2.h" #include <cuda_runtime.h> #include <cblas.h> int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("\nTiming Matrix Multiplication at"); printf("device %d: %s \n\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); printf("Performs the following timings:\n\n - matMul on HOST and matMul_gpu1, matMul_gpu2 matMul_gpu_dsm, matMul_gpu_dsm_coa, matMul_cublas,matMul_gpu_sm_tr,matMul_gpu_sm_tr_ind on Device with and without copying\n \n"); printf("\n_________________________________________________\n"); // GPU Functions srand(seconds()); // Initialization, should only be called once. double start,t,t1,t2,t3,t4,t5,t6,t7,t8,t9; FILE *fp = fopen("../analysis/matMulTimes.txt", "w"); fprintf(fp,"N\tT_B\tMM1\tMM2\tDSM\tDSM_COA\tcuBlas\tSM\tSM_tr\tCPU\tSM_trInd\n"); printf("N\tT_B\tMM1\tMM2\tDSM\tDSM_COA\tcuBlas\tSM\tSM_tr\tCPU\tSM_trInd\n"); int threads_block; //maximum shift for maximum dimension size int max_shift=11; // loop over dimension sizes for(int i=0;i<=max_shift;i++){ // set times on max for every size t1=t2=t3=t4=t5=t6=t7=t8=t9=DBL_MAX; // set up dimensions and arrays int N=1<<i; int dimsA[2]={N,N}; int dimsB[2]={N,N}; int A_nelem=dimsA[0]*dimsA[1]; int B_nelem=dimsB[0]*dimsB[1]; int C_nelem=dimsA[0]*dimsB[1]; double *A = (double *)malloc(A_nelem*sizeof(double)); double *B = (double *)malloc(B_nelem*sizeof(double)); double *C = (double *)malloc(C_nelem*sizeof(double)); // set sqrt(threads_block) for(int k=8;k<=32;k*=2){ threads_block=k*k; // best of 3 for (int j=0;j<3;j++){ create_random_matrix(A,A_nelem,0,10); create_random_matrix(B,B_nelem,0,10); start=seconds(); matMul_gpu1(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t1=(t<t1) ? t : t1 ; start=seconds(); matMul_gpu2(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t2=(t<t2) ? t : t2 ; start=seconds(); matMul_gpu_dsm(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t3=(t<t3) ? t : t3 ; start=seconds(); matMul_gpu_sm(A, B, dimsA[0],dimsB[1],dimsA[1],C); t=seconds()-start; t6=(t<t6) ? t : t6 ; start=seconds(); matMul_gpu_dsm_coa(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t4=(t<t4) ? t : t4 ; start=seconds(); matMul_cublas(A, B, dimsA[0],dimsB[1],dimsA[1],C,threads_block); t=seconds()-start; t5=(t<t5) ? t : t5 ; start=seconds(); matMul_gpu_sm_tr(A, B, NORMAL,NORMAL,dimsA[0],dimsA[1],dimsB[0],dimsB[1],C); t=seconds()-start; t7=(t<t7) ? t : t7 ; if (i<=9){ start=seconds(); matMul(A, B, dimsA[0],dimsB[1],dimsA[1],C); t=seconds()-start; t8=(t<t8) ? t : t8 ; } start=seconds(); matMul_gpu_sm_tr_ind(A, B, NORMAL,NORMAL,dimsA[0],dimsA[1],dimsB[0],dimsB[1],C); t=seconds()-start; t9=(t<t9) ? t : t9 ; } // print to file printf("%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); fprintf(fp,"%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); } free(A); free(B); free(C); } fclose (fp); // onDev functions FILE *fp2 = fopen("../analysis/matMulTimesOnDev.txt", "w"); fprintf(fp2,"N\tT_B\tMM1\tMM2\tDSM\tDSM_COA\tcuBlas\tSM\tSM_tr\tCPU\tSM_trInd\n"); printf("\nonDev\n"); for(int i=0;i<=max_shift;i++){ t1=t2=t3=t4=t5=t6=t7=t8=t9=DBL_MAX; int N=1<<i; int dimsD[2]={N,N}; int dimsE[2]={N,N}; int D_nelem=dimsD[0]*dimsD[1]; int E_nelem=dimsE[0]*dimsE[1]; int F_nelem=dimsD[0]*dimsE[1]; double *D = (double *)malloc(D_nelem*sizeof(double)); double *E = (double *)malloc(E_nelem*sizeof(double)); double *F = (double *)malloc(F_nelem*sizeof(double)); double *dev_D = (double *)malloc(D_nelem*sizeof(double)); double *dev_E = (double *)malloc(E_nelem*sizeof(double)); double *dev_F = (double *)malloc(F_nelem*sizeof(double)); CHECK(cudaMalloc((void**)&dev_D, D_nelem*sizeof(double))); CHECK(cudaMalloc((void**)&dev_E, E_nelem*sizeof(double))); CHECK(cudaMalloc((void**)&dev_F, F_nelem*sizeof(double))); for(int k=8;k<=32;k*=2){ threads_block=k*k; // best of 3 for (int j=0;j<3;j++){ create_random_matrix(D,D_nelem,0,10); create_random_matrix(E,E_nelem,0,10); copy_host_to_device_double(D,dev_D,D_nelem); copy_host_to_device_double(E,dev_E,E_nelem); start=seconds(); matMul_onDev1(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t1=(t<t1) ? t : t1 ; start=seconds(); matMul_onDev2(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t2=(t<t2) ? t : t2 ; start=seconds(); matMul_dsm_onDev(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t3=(t<t3) ? t : t3 ; start=seconds(); matMul_sm_onDev(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F); t=seconds()-start; t6=(t<t6) ? t : t6 ; start=seconds(); matMul_dsm_coa_onDev(dev_D, dev_E, dimsD[0],dimsE[1],dimsD[1],dev_F,threads_block); t=seconds()-start; t4=(t<t4) ? t : t4 ; cublasStatus_t stat; cublasHandle_t handle; stat = cublasCreate(&handle); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); } const double alpha=1.0; const double beta=0.0; // Invoke kernel start=seconds(); cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, dimsE[1], dimsD[0], dimsE[0],&alpha,(const double *)dev_E, dimsE[1],(const double *)dev_D, dimsD[1],&beta,(double *)dev_F, dimsE[1]); CHECK(cudaDeviceSynchronize()); cublasDestroy(handle); t=seconds()-start; t5=(t<t5) ? t : t5 ; start=seconds(); matMul_sm_onDev_tr(dev_D, dev_E, NORMAL,NORMAL,dimsD[0],dimsD[1],dimsE[0],dimsE[1],dev_F); t=seconds()-start; t7=(t<t7) ? t : t7 ; if (i<=9){ start=seconds(); matMul(D, E, dimsD[0],dimsE[1],dimsD[1],F); t=seconds()-start; t8=(t<t8) ? t : t8 ; } start=seconds(); matMul_sm_onDev_tr_ind(dev_D, dev_E, NORMAL,NORMAL,dimsD[0],dimsD[1],dimsE[0],dimsE[1],dev_F); t=seconds()-start; t9=(t<t9) ? t : t9 ; } // print to file printf("%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); fprintf(fp2,"%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",N,threads_block,t1,t2,t3,t4,t5,t6,t7,t8,t9); } free(D); free(E); free(F); CHECK(cudaFree(dev_D)); CHECK(cudaFree(dev_E)); CHECK(cudaFree(dev_F)); } fclose (fp2); return EXIT_SUCCESS; }
31edca18c1bdfd603faf31feeb973ee5268e543e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cumath.h" #define real float #define real3 float3 #define real4 float4 #define mkreal4 make_float4 #define SHAPEX 4 #define SHAPEY 4 #define SHAPEZ 4 #define L(x,y,z) ((x) + ( (y) + (z) * SHAPEY ) * SHAPEX) #define Gx 0 #define Gy -9 #define Gz 0 __device__ real norm( real4 p ) { return sqrt( p.x*p.x + p.y*p.y + p.z*p.z ); } __device__ real4 spring_force( real4 p0 , real4 p1 , real l0 ) { real4 dp = p1 - p0; real dl = norm( dp ); if( dl == 0 ) return mkreal4(0,0,0,0); real l = l0 -dl; return dp / dl * l; } extern "C" { __global__ void control( real4*pts , real4*nl , real ctlx , real ctly , real ctlz , real l ) { int x = threadIdx.x; int y = threadIdx.y; int z = threadIdx.z; int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 p = mkreal4(0,0,0,0); p.x = ctlx + l * (z-1.5); p.y = ctly + l * (y-1.5); p.z = ctlz + l * (x-1.5); if( !(x%3) && !(y%3) && !(z%3) ) nl[i] += spring_force( pts[i] , p , 0 ); } __global__ void springs( real4*pts , real4*nl , float l0 , float l1 ) { int x = threadIdx.x; int y = threadIdx.y; int z = threadIdx.z; int i = x + ( y + z * SHAPEY ) * SHAPEX; if( x+1 < SHAPEX ) nl[i]+=spring_force(pts[i],pts[L(x+1,y,z)],l0); if( y+1 < SHAPEY ) nl[i]+=spring_force(pts[i],pts[L(x,y+1,z)],l0); if( z+1 < SHAPEZ ) nl[i]+=spring_force(pts[i],pts[L(x,y,z+1)],l0); if( x-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y,z)],l0); if( y-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y-1,z)],l0); if( z-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y,z-1)],l0); if( x+1 < SHAPEX && y+1<SHAPEY ) nl[i]+=spring_force(pts[i],pts[L(x+1,y+1,z)],l1); if( y+1 < SHAPEY && z+1<SHAPEZ ) nl[i]+=spring_force(pts[i],pts[L(x,y+1,z+1)],l1); if( z+1 < SHAPEZ && x+1<SHAPEX ) nl[i]+=spring_force(pts[i],pts[L(x+1,y,z+1)],l1); if( x-1 >= 0 && y+1 < SHAPEY ) nl[i]+=spring_force(pts[i],pts[L(x-1,y+1,z)],l1); if( y-1 >= 0 && z+1 < SHAPEZ ) nl[i]+=spring_force(pts[i],pts[L(x,y-1,z+1)],l1); if( z-1 >= 0 && x+1 < SHAPEX ) nl[i]+=spring_force(pts[i],pts[L(x+1,y,z-1)],l1); if( x+1 < SHAPEX && y-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x+1,y-1,z)],l1); if( y+1 < SHAPEY && z-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y+1,z-1)],l1); if( z+1 < SHAPEZ && x-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y,z+1)],l1); if( x-1 >= 0 && y-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y-1,z)],l1); if( y-1 >= 0 && z-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y-1,z-1)],l1); if( z-1 >= 0 && x-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y,z-1)],l1); } __global__ void update_forces( real4*frs , real4*nl , real4*pl , real k , real c , real dt ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 lp = ( nl[i] - pl[i] ) / ( 2.0 * dt ); frs[i] += -k * lp - c * nl[i]; pl[i] = nl[i]; } __global__ void update( real4*pts , real4*prv , real4*frs , real*mas , real k , real c , real dt ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 a = mkreal4(0,0,0,0); real4 n = mkreal4(0,0,0,1); real4 f = frs[i]; f.x += Gx; f.y += Gy; f.z += Gz; a = f / mas[i]; n = a * dt * dt + 2 * pts[i] - prv[i]; prv[i] = pts[i]; pts[i] = n; } __global__ void update2( real4*pts , real4*prv , real4*fr1 , real4*fr2 , real*mas , real k , real c , real dt ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 a = mkreal4(0,0,0,0); real4 n = mkreal4(0,0,0,1); real4 f = fr1[i] + fr2[i]; f.x += Gx; f.y += Gy; f.z += Gz; a = f / mas[i]; n = a * dt * dt + 2 * pts[i] - prv[i]; prv[i] = pts[i]; pts[i] = n; pts[i].w = 1.0; } __global__ void collisions( real4*pts , real4*prv , real*brd , real u ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real n; for( unsigned int j=0 ; j<3 ; ++j ) { if( pts[i].x <= brd[0] || pts[i].x >= brd[1] ) { n = prv[i].x+u*pts[i].x-u*prv[i].x; pts[i].x = prv[i].x; prv[i].x = n; } if( pts[i].y <= brd[2] || pts[i].y >= brd[3] ) { n = prv[i].y+u*pts[i].y-u*prv[i].y; pts[i].y = prv[i].y; prv[i].y = n; } if( pts[i].z <= brd[4] || pts[i].z >= brd[5] ) { n = prv[i].z+u*pts[i].z-u*prv[i].z; pts[i].z = prv[i].z; prv[i].z = n; } } } }
31edca18c1bdfd603faf31feeb973ee5268e543e.cu
#include "cumath.h" #define real float #define real3 float3 #define real4 float4 #define mkreal4 make_float4 #define SHAPEX 4 #define SHAPEY 4 #define SHAPEZ 4 #define L(x,y,z) ((x) + ( (y) + (z) * SHAPEY ) * SHAPEX) #define Gx 0 #define Gy -9 #define Gz 0 __device__ real norm( real4 p ) { return sqrt( p.x*p.x + p.y*p.y + p.z*p.z ); } __device__ real4 spring_force( real4 p0 , real4 p1 , real l0 ) { real4 dp = p1 - p0; real dl = norm( dp ); if( dl == 0 ) return mkreal4(0,0,0,0); real l = l0 -dl; return dp / dl * l; } extern "C" { __global__ void control( real4*pts , real4*nl , real ctlx , real ctly , real ctlz , real l ) { int x = threadIdx.x; int y = threadIdx.y; int z = threadIdx.z; int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 p = mkreal4(0,0,0,0); p.x = ctlx + l * (z-1.5); p.y = ctly + l * (y-1.5); p.z = ctlz + l * (x-1.5); if( !(x%3) && !(y%3) && !(z%3) ) nl[i] += spring_force( pts[i] , p , 0 ); } __global__ void springs( real4*pts , real4*nl , float l0 , float l1 ) { int x = threadIdx.x; int y = threadIdx.y; int z = threadIdx.z; int i = x + ( y + z * SHAPEY ) * SHAPEX; if( x+1 < SHAPEX ) nl[i]+=spring_force(pts[i],pts[L(x+1,y,z)],l0); if( y+1 < SHAPEY ) nl[i]+=spring_force(pts[i],pts[L(x,y+1,z)],l0); if( z+1 < SHAPEZ ) nl[i]+=spring_force(pts[i],pts[L(x,y,z+1)],l0); if( x-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y,z)],l0); if( y-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y-1,z)],l0); if( z-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y,z-1)],l0); if( x+1 < SHAPEX && y+1<SHAPEY ) nl[i]+=spring_force(pts[i],pts[L(x+1,y+1,z)],l1); if( y+1 < SHAPEY && z+1<SHAPEZ ) nl[i]+=spring_force(pts[i],pts[L(x,y+1,z+1)],l1); if( z+1 < SHAPEZ && x+1<SHAPEX ) nl[i]+=spring_force(pts[i],pts[L(x+1,y,z+1)],l1); if( x-1 >= 0 && y+1 < SHAPEY ) nl[i]+=spring_force(pts[i],pts[L(x-1,y+1,z)],l1); if( y-1 >= 0 && z+1 < SHAPEZ ) nl[i]+=spring_force(pts[i],pts[L(x,y-1,z+1)],l1); if( z-1 >= 0 && x+1 < SHAPEX ) nl[i]+=spring_force(pts[i],pts[L(x+1,y,z-1)],l1); if( x+1 < SHAPEX && y-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x+1,y-1,z)],l1); if( y+1 < SHAPEY && z-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y+1,z-1)],l1); if( z+1 < SHAPEZ && x-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y,z+1)],l1); if( x-1 >= 0 && y-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y-1,z)],l1); if( y-1 >= 0 && z-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x,y-1,z-1)],l1); if( z-1 >= 0 && x-1 >= 0 ) nl[i]+=spring_force(pts[i],pts[L(x-1,y,z-1)],l1); } __global__ void update_forces( real4*frs , real4*nl , real4*pl , real k , real c , real dt ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 lp = ( nl[i] - pl[i] ) / ( 2.0 * dt ); frs[i] += -k * lp - c * nl[i]; pl[i] = nl[i]; } __global__ void update( real4*pts , real4*prv , real4*frs , real*mas , real k , real c , real dt ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 a = mkreal4(0,0,0,0); real4 n = mkreal4(0,0,0,1); real4 f = frs[i]; f.x += Gx; f.y += Gy; f.z += Gz; a = f / mas[i]; n = a * dt * dt + 2 * pts[i] - prv[i]; prv[i] = pts[i]; pts[i] = n; } __global__ void update2( real4*pts , real4*prv , real4*fr1 , real4*fr2 , real*mas , real k , real c , real dt ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real4 a = mkreal4(0,0,0,0); real4 n = mkreal4(0,0,0,1); real4 f = fr1[i] + fr2[i]; f.x += Gx; f.y += Gy; f.z += Gz; a = f / mas[i]; n = a * dt * dt + 2 * pts[i] - prv[i]; prv[i] = pts[i]; pts[i] = n; pts[i].w = 1.0; } __global__ void collisions( real4*pts , real4*prv , real*brd , real u ) { unsigned int x = threadIdx.x; unsigned int y = threadIdx.y; unsigned int z = threadIdx.z; unsigned int i = x + ( y + z * SHAPEY ) * SHAPEX; real n; for( unsigned int j=0 ; j<3 ; ++j ) { if( pts[i].x <= brd[0] || pts[i].x >= brd[1] ) { n = prv[i].x+u*pts[i].x-u*prv[i].x; pts[i].x = prv[i].x; prv[i].x = n; } if( pts[i].y <= brd[2] || pts[i].y >= brd[3] ) { n = prv[i].y+u*pts[i].y-u*prv[i].y; pts[i].y = prv[i].y; prv[i].y = n; } if( pts[i].z <= brd[4] || pts[i].z >= brd[5] ) { n = prv[i].z+u*pts[i].z-u*prv[i].z; pts[i].z = prv[i].z; prv[i].z = n; } } } }
95ecedba45e395cff386123f1e5310b718397a77.hip
// !!! This is a file automatically generated by hipify!!! // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include "sceneStructs.h" #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" #include <vector> #include <time.h> #if TORCH_HIP_VERSION >= 5000 #include <helper_math.h> #else #include <cutil_math.h> #endif void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } //LOOK: This function demonstrates how to use thrust for random number generation on the GPU! //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } // HW TODO: IMPLEMENT THIS FUNCTION // Does initial raycast from camera. // (0, 0) is at top right corner of screen. __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) { float resWidth = (float) resolution.x; float resHeight = (float) resolution.y; glm::vec3 c(view); // View direction (unit vector) from eye glm::vec3 e(eye); // Camera center position glm::vec3 m = e + c; // Midpoint of screen glm::vec3 u(up); // Up vector glm::vec3 a = glm::cross(c, u); // c x u TODO: make sure this is well defined glm::vec3 b = glm::cross(a, c); // a x c TODO: make sure this is well defined glm::vec3 v; // Vertical vector from "m" to top of screen glm::vec3 h; // Horizontal vector from "m" to right of screen // Calculate v & h { float phi = fov.y * PI / 180.0f / 2.0f; float screenRatio = resHeight / resWidth; v = b * tan(phi) / (float)glm::length(b); float theta = atan(glm::length(v)/screenRatio / (float)glm::length(c)); h = a * (float)glm::length(c) * tan(theta) / (float)glm::length(a); } // Obtain a unit vector in the direction from the eye to a pixel point (x, y) on screen float sx = ((float) x) / ((float) (resWidth - 1)); float sy = ((float) y) / ((float) (resHeight - 1)); glm::vec3 p = m - (2*sx - 1)*h - (2*sy - 1)*v; // World position of point (x, y) on screen glm::vec3 rayUnitVec = glm::normalize(p-e); ray r; r.origin = eye; r.direction = rayUnitVec; //r.origin = glm::vec3(0,0,0); //r.direction = glm::vec3(0,0,-1); return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } __device__ glm::vec3 reflect(glm::vec3 const & I, glm::vec3 const & N) { return I - 2.0f * glm::dot(N, I) * N; } __device__ bool isRayUnblocked(glm::vec3 const & point1, glm::vec3 const & point2, staticGeom* geoms, int numberOfGeoms) { glm::vec3 DIRECTION(point2 - point1); float DISTANCE = glm::length(DIRECTION); // Offset start position in ray direction by small distance to prevent self collisions float DELTA = 0.001f; ray r; r.origin = point1 + DELTA * DIRECTION; r.direction = glm::normalize(DIRECTION); for (int i=0; i<numberOfGeoms; ++i) { float intersectionDistance; glm::vec3 intersectionPoint; glm::vec3 normal; switch (geoms[i].type) { case SPHERE: intersectionDistance = sphereIntersectionTest(geoms[i], r, intersectionPoint, normal); break; case CUBE: intersectionDistance = boxIntersectionTest(geoms[i], r, intersectionPoint, normal); break; case MESH: intersectionDistance = -1.0f; break; } // Does not intersect so check next primitive if (intersectionDistance <= 0.0f) continue; // Take into consideration intersection only between the two points. if (intersectionDistance < DISTANCE) return false; } return true; } // HW TODO: IMPLEMENT THIS FUNCTION // Core raytracer kernel (Assumes geometry material index is valid) __global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if ( x >= resolution.x || y >= resolution.y ) return; ray r; r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov); // ============================================ // Determine closest intersection with geometry // ============================================ float distance = -1.0f; glm::vec3 intersection; glm::vec3 normal; int materialIdx; for (int i = 0; i < numberOfGeoms; ++i) { float newDistance; glm::vec3 newIntersection; glm::vec3 newNormal; switch (geoms[i].type) { case SPHERE: newDistance = sphereIntersectionTest(geoms[i], r, newIntersection, newNormal); break; case CUBE: newDistance = boxIntersectionTest(geoms[i], r, newIntersection, newNormal); break; case MESH: newDistance = -1.0f; break; } if ( newDistance < 0.0f ) continue; if ( distance < 0.0f || (distance > 0.0f && newDistance < distance) ) { distance = newDistance; intersection = newIntersection; normal = newNormal; materialIdx = geoms[i].materialid; } } // ============================================ // Paint pixel // ============================================ // No hit if ( distance < 0.0f ) { colors[index] = glm::vec3(0.0f, 0.0f, 0.0f); //colors[index] = generateRandomNumberFromThread(resolution, time, x, y); return; } // Simple local reflectance model (local illumination model formula) float reflectivity = 0.0f; float transmittance = 1.0f - reflectivity; glm::vec3 materialColor = materials[materialIdx].color; glm::vec3 reflectedColor(0.0f, 0.0f, 0.0f); glm::vec3 ambientLightColor(1.0f, 1.0f, 1.0f); float AMBIENT_WEIGHT = 0.2f; // Ka - Ambient reflectivity factor float DIFFUSE_WEIGHT = 0.3f; // Kd - Diffuse reflectivity factor float SPECULAR_WEIGHT = 0.5f; // Ks - Specular reflectivity factor glm::vec3 lightColor(1.0f, 1.0f, 1.0f); glm::vec3 color = AMBIENT_WEIGHT * ambientLightColor * materialColor; thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(-0.15f, 0.15f); for ( int i = 0; i < 1; ++i) { glm::vec3 lightPosition(0.5f + (float) u01(rng), 0.75f, -0.5f + (float) u01(rng)); // Unit vector from intersection point to light source glm::vec3 LIGHT_DIRECTION = glm::normalize(lightPosition - intersection); // Direction of reflected light at intersection point glm::vec3 LIGHT_REFLECTION = glm::normalize(reflect(-1.0f*LIGHT_DIRECTION, normal)); // Determine diffuse term float diffuseTerm; diffuseTerm = glm::dot(normal, LIGHT_DIRECTION); diffuseTerm = glm::clamp(diffuseTerm, 0.0f, 1.0f); // Determine specular term float specularTerm = 0.0f; if ( materials[materialIdx].specularExponent - 0.0f > 0.001f ) { float SPECULAR_EXPONENT = materials[materialIdx].specularExponent; glm::vec3 EYE_DIRECTION = glm::normalize(cam.position - intersection); specularTerm = glm::dot(LIGHT_REFLECTION, EYE_DIRECTION); specularTerm = pow(fmaxf(specularTerm, 0.0f), SPECULAR_EXPONENT); specularTerm = glm::clamp(specularTerm, 0.0f, 1.0f); } if (isRayUnblocked(intersection, lightPosition, geoms, numberOfGeoms)) { color += DIFFUSE_WEIGHT * lightColor * materialColor * diffuseTerm / 1.0f; color += SPECULAR_WEIGHT * lightColor * specularTerm / 1.0f; } } colors[index] = reflectivity*reflectedColor + transmittance*color; } // HW TODO: FINISH THIS FUNCTION // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){ clock_t time1, time2; time1 = clock(); int traceDepth = 1; //determines how many bounces the raytracer traces // set up crucial magic int tileSize = 16; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize))); //send image to GPU glm::vec3* cudaimage = NULL; hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice); //package geometry and materials and sent to GPU staticGeom* geomList = new staticGeom[numberOfGeoms]; for(int i=0; i<numberOfGeoms; i++){ staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[i] = newStaticGeom; } staticGeom* cudageoms = NULL; hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice); material* cudamaterials = NULL; hipMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material)); hipMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice); //package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; //kernel launches hipLaunchKernelGGL(( raytraceRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials); hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage); //retrieve image from GPU hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost); //free up stuff, or else we'll leak memory like a madman hipFree( cudaimage ); hipFree( cudageoms ); hipFree( cudamaterials ); delete geomList; // make certain the kernel has completed hipDeviceSynchronize(); checkCUDAError("Kernel failed!"); time2 = clock(); float execution_time = ((float) (time2 - time1)) / CLOCKS_PER_SEC; printf ("Execution time: %f\n", execution_time); }
95ecedba45e395cff386123f1e5310b718397a77.cu
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <cuda.h> #include <cmath> #include "sceneStructs.h" #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" #include <vector> #include <time.h> #if CUDA_VERSION >= 5000 #include <helper_math.h> #else #include <cutil_math.h> #endif void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } //LOOK: This function demonstrates how to use thrust for random number generation on the GPU! //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } // HW TODO: IMPLEMENT THIS FUNCTION // Does initial raycast from camera. // (0, 0) is at top right corner of screen. __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov) { float resWidth = (float) resolution.x; float resHeight = (float) resolution.y; glm::vec3 c(view); // View direction (unit vector) from eye glm::vec3 e(eye); // Camera center position glm::vec3 m = e + c; // Midpoint of screen glm::vec3 u(up); // Up vector glm::vec3 a = glm::cross(c, u); // c x u TODO: make sure this is well defined glm::vec3 b = glm::cross(a, c); // a x c TODO: make sure this is well defined glm::vec3 v; // Vertical vector from "m" to top of screen glm::vec3 h; // Horizontal vector from "m" to right of screen // Calculate v & h { float phi = fov.y * PI / 180.0f / 2.0f; float screenRatio = resHeight / resWidth; v = b * tan(phi) / (float)glm::length(b); float theta = atan(glm::length(v)/screenRatio / (float)glm::length(c)); h = a * (float)glm::length(c) * tan(theta) / (float)glm::length(a); } // Obtain a unit vector in the direction from the eye to a pixel point (x, y) on screen float sx = ((float) x) / ((float) (resWidth - 1)); float sy = ((float) y) / ((float) (resHeight - 1)); glm::vec3 p = m - (2*sx - 1)*h - (2*sy - 1)*v; // World position of point (x, y) on screen glm::vec3 rayUnitVec = glm::normalize(p-e); ray r; r.origin = eye; r.direction = rayUnitVec; //r.origin = glm::vec3(0,0,0); //r.direction = glm::vec3(0,0,-1); return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } __device__ glm::vec3 reflect(glm::vec3 const & I, glm::vec3 const & N) { return I - 2.0f * glm::dot(N, I) * N; } __device__ bool isRayUnblocked(glm::vec3 const & point1, glm::vec3 const & point2, staticGeom* geoms, int numberOfGeoms) { glm::vec3 DIRECTION(point2 - point1); float DISTANCE = glm::length(DIRECTION); // Offset start position in ray direction by small distance to prevent self collisions float DELTA = 0.001f; ray r; r.origin = point1 + DELTA * DIRECTION; r.direction = glm::normalize(DIRECTION); for (int i=0; i<numberOfGeoms; ++i) { float intersectionDistance; glm::vec3 intersectionPoint; glm::vec3 normal; switch (geoms[i].type) { case SPHERE: intersectionDistance = sphereIntersectionTest(geoms[i], r, intersectionPoint, normal); break; case CUBE: intersectionDistance = boxIntersectionTest(geoms[i], r, intersectionPoint, normal); break; case MESH: intersectionDistance = -1.0f; break; } // Does not intersect so check next primitive if (intersectionDistance <= 0.0f) continue; // Take into consideration intersection only between the two points. if (intersectionDistance < DISTANCE) return false; } return true; } // HW TODO: IMPLEMENT THIS FUNCTION // Core raytracer kernel (Assumes geometry material index is valid) __global__ void raytraceRay(glm::vec2 resolution, float time, cameraData cam, int rayDepth, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if ( x >= resolution.x || y >= resolution.y ) return; ray r; r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov); // ============================================ // Determine closest intersection with geometry // ============================================ float distance = -1.0f; glm::vec3 intersection; glm::vec3 normal; int materialIdx; for (int i = 0; i < numberOfGeoms; ++i) { float newDistance; glm::vec3 newIntersection; glm::vec3 newNormal; switch (geoms[i].type) { case SPHERE: newDistance = sphereIntersectionTest(geoms[i], r, newIntersection, newNormal); break; case CUBE: newDistance = boxIntersectionTest(geoms[i], r, newIntersection, newNormal); break; case MESH: newDistance = -1.0f; break; } if ( newDistance < 0.0f ) continue; if ( distance < 0.0f || (distance > 0.0f && newDistance < distance) ) { distance = newDistance; intersection = newIntersection; normal = newNormal; materialIdx = geoms[i].materialid; } } // ============================================ // Paint pixel // ============================================ // No hit if ( distance < 0.0f ) { colors[index] = glm::vec3(0.0f, 0.0f, 0.0f); //colors[index] = generateRandomNumberFromThread(resolution, time, x, y); return; } // Simple local reflectance model (local illumination model formula) float reflectivity = 0.0f; float transmittance = 1.0f - reflectivity; glm::vec3 materialColor = materials[materialIdx].color; glm::vec3 reflectedColor(0.0f, 0.0f, 0.0f); glm::vec3 ambientLightColor(1.0f, 1.0f, 1.0f); float AMBIENT_WEIGHT = 0.2f; // Ka - Ambient reflectivity factor float DIFFUSE_WEIGHT = 0.3f; // Kd - Diffuse reflectivity factor float SPECULAR_WEIGHT = 0.5f; // Ks - Specular reflectivity factor glm::vec3 lightColor(1.0f, 1.0f, 1.0f); glm::vec3 color = AMBIENT_WEIGHT * ambientLightColor * materialColor; thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(-0.15f, 0.15f); for ( int i = 0; i < 1; ++i) { glm::vec3 lightPosition(0.5f + (float) u01(rng), 0.75f, -0.5f + (float) u01(rng)); // Unit vector from intersection point to light source glm::vec3 LIGHT_DIRECTION = glm::normalize(lightPosition - intersection); // Direction of reflected light at intersection point glm::vec3 LIGHT_REFLECTION = glm::normalize(reflect(-1.0f*LIGHT_DIRECTION, normal)); // Determine diffuse term float diffuseTerm; diffuseTerm = glm::dot(normal, LIGHT_DIRECTION); diffuseTerm = glm::clamp(diffuseTerm, 0.0f, 1.0f); // Determine specular term float specularTerm = 0.0f; if ( materials[materialIdx].specularExponent - 0.0f > 0.001f ) { float SPECULAR_EXPONENT = materials[materialIdx].specularExponent; glm::vec3 EYE_DIRECTION = glm::normalize(cam.position - intersection); specularTerm = glm::dot(LIGHT_REFLECTION, EYE_DIRECTION); specularTerm = pow(fmaxf(specularTerm, 0.0f), SPECULAR_EXPONENT); specularTerm = glm::clamp(specularTerm, 0.0f, 1.0f); } if (isRayUnblocked(intersection, lightPosition, geoms, numberOfGeoms)) { color += DIFFUSE_WEIGHT * lightColor * materialColor * diffuseTerm / 1.0f; color += SPECULAR_WEIGHT * lightColor * specularTerm / 1.0f; } } colors[index] = reflectivity*reflectedColor + transmittance*color; } // HW TODO: FINISH THIS FUNCTION // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms){ clock_t time1, time2; time1 = clock(); int traceDepth = 1; //determines how many bounces the raytracer traces // set up crucial magic int tileSize = 16; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize))); //send image to GPU glm::vec3* cudaimage = NULL; cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice); //package geometry and materials and sent to GPU staticGeom* geomList = new staticGeom[numberOfGeoms]; for(int i=0; i<numberOfGeoms; i++){ staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; geomList[i] = newStaticGeom; } staticGeom* cudageoms = NULL; cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice); material* cudamaterials = NULL; cudaMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material)); cudaMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice); //package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; //kernel launches raytraceRay<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution, (float)iterations, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials); sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage); //retrieve image from GPU cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost); //free up stuff, or else we'll leak memory like a madman cudaFree( cudaimage ); cudaFree( cudageoms ); cudaFree( cudamaterials ); delete geomList; // make certain the kernel has completed cudaThreadSynchronize(); checkCUDAError("Kernel failed!"); time2 = clock(); float execution_time = ((float) (time2 - time1)) / CLOCKS_PER_SEC; printf ("Execution time: %f\n", execution_time); }
9512a1e064d5312b60977e6c29472dfd2d7b5b4c.hip
// !!! This is a file automatically generated by hipify!!! #include "phong.h" rtDeclareVariable(float3, shading_normal, attribute shading_normal, ); rtDeclareVariable(float3, geometric_normal, attribute geometric_normal, ); // // Transparent object shadows, no textures // rtDeclareVariable(float3, shadow_attenuation, , ); RT_PROGRAM void any_hit_glass() { const float3 world_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float nDi = fabs(dot(world_normal, ray.direction)); prd_shadow.attenuation *= 1.0f - optix::fresnel_schlick(nDi, 5.0f, 1.0f - shadow_attenuation, make_float3(1.0f)).x; rtIgnoreIntersection(); } // // Glass shader, no textures // rtDeclareVariable(float3, cutoff_color, , ); rtDeclareVariable(float, fresnel_exponent, , ); rtDeclareVariable(float, fresnel_minimum, , ); rtDeclareVariable(float, fresnel_maximum, , ); rtDeclareVariable(float, refraction_index, , ); rtDeclareVariable(float3, refraction_color, , ); rtDeclareVariable(float3, reflection_color, , ); rtDeclareVariable(float3, extinction_constant, , ); rtDeclareVariable(int, use_internal_reflections, , ); RT_PROGRAM void closest_hit_glass() { const float3 h = ray.origin + t_hit * ray.direction; const float3 n = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); // normal const float3 &i = ray.direction;// incident direction float reflection = 1.0f; float3 result = make_float3(0.0f); const float3 beer_attenuation = dot(n, ray.direction) > 0.0f ? exp(extinction_constant * t_hit) : make_float3(1.0f); bool inside = false; if(prd_radiance.depth < max_depth) { float3 t; if(refract(t, i, n, refraction_index)) { // check for external or internal reflection float cos_theta = dot(i, n); if(cos_theta < 0.0f) cos_theta = -cos_theta; else { inside = true; cos_theta = dot(t, n); } reflection = fresnel_schlick(cos_theta, fresnel_exponent, fresnel_minimum, fresnel_maximum); const float importance = prd_radiance.importance * (1.0f - reflection) * optix::luminance(refraction_color * beer_attenuation); if(importance > importance_cutoff) { optix::Ray ray(h, t, radiance_ray_type, scene_epsilon); PerRayData_radiance refr_prd; refr_prd.depth = prd_radiance.depth+1; refr_prd.importance = importance; rtTrace(top_object, ray, refr_prd); result += (1.0f - reflection) * refraction_color * refr_prd.result; } else result += (1.0f - reflection) * refraction_color * cutoff_color; } float3 r = reflect(i, n); const float importance = prd_radiance.importance * reflection * optix::luminance(reflection_color * beer_attenuation); if(importance > importance_cutoff && (!inside || (inside && use_internal_reflections))) { const optix::Ray ray(h, r, radiance_ray_type, scene_epsilon); PerRayData_radiance refl_prd; refl_prd.depth = prd_radiance.depth + 1; refl_prd.importance = importance; rtTrace(top_object, ray, refl_prd); result += reflection * reflection_color * refl_prd.result; } else result += reflection * reflection_color * cutoff_color; } result = result * beer_attenuation; prd_radiance.result = result; } rtDeclareVariable(float3, Ka, , ); rtDeclareVariable(float3, Kd, , ); rtDeclareVariable(float3, Ks, , ); rtDeclareVariable(float3, reflectivity, , ); rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> diffuse_map; rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> specular_map; rtDeclareVariable(float3, texcoord, attribute texcoord, ); // //solid mesh with textures and reflectivity // RT_PROGRAM void closest_hit_mesh() { const float4 pKd = tex2D(diffuse_map, texcoord.x, texcoord.y); if(prd_radiance.depth < max_depth && pKd.w < importance_cutoff) { const optix::Ray newray(ray.origin + t_hit * ray.direction, ray.direction, radiance_ray_type, scene_epsilon); prd_radiance.depth++; rtTrace(top_object, newray, prd_radiance); return; } const float3 world_shading_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float3 world_geometric_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, geometric_normal)); const float3 ffnormal = faceforward(world_shading_normal, -ray.direction, world_geometric_normal); const float4 pKs = tex2D(specular_map, texcoord.x, texcoord.y); //phongShade(ffnormal, make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); //phongShade(make_float3(abs(ffnormal.x), abs(ffnormal.y), abs(ffnormal.z)), make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); phongShade(make_float3(pKd) * Ka, make_float3(pKd) * Kd, make_float3(pKs) * Ks, ffnormal, pKs.w * 255.0f, reflectivity); } // // Terminates and fully attenuates ray after any hit // RT_PROGRAM void any_hit_solid() { const float opacity = tex2D(diffuse_map, texcoord.x, texcoord.y).w; if(opacity < importance_cutoff) rtIgnoreIntersection(); phongShadowed(); }
9512a1e064d5312b60977e6c29472dfd2d7b5b4c.cu
#include "phong.h" rtDeclareVariable(float3, shading_normal, attribute shading_normal, ); rtDeclareVariable(float3, geometric_normal, attribute geometric_normal, ); // // Transparent object shadows, no textures // rtDeclareVariable(float3, shadow_attenuation, , ); RT_PROGRAM void any_hit_glass() { const float3 world_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float nDi = fabs(dot(world_normal, ray.direction)); prd_shadow.attenuation *= 1.0f - optix::fresnel_schlick(nDi, 5.0f, 1.0f - shadow_attenuation, make_float3(1.0f)).x; rtIgnoreIntersection(); } // // Glass shader, no textures // rtDeclareVariable(float3, cutoff_color, , ); rtDeclareVariable(float, fresnel_exponent, , ); rtDeclareVariable(float, fresnel_minimum, , ); rtDeclareVariable(float, fresnel_maximum, , ); rtDeclareVariable(float, refraction_index, , ); rtDeclareVariable(float3, refraction_color, , ); rtDeclareVariable(float3, reflection_color, , ); rtDeclareVariable(float3, extinction_constant, , ); rtDeclareVariable(int, use_internal_reflections, , ); RT_PROGRAM void closest_hit_glass() { const float3 h = ray.origin + t_hit * ray.direction; const float3 n = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); // normal const float3 &i = ray.direction;// incident direction float reflection = 1.0f; float3 result = make_float3(0.0f); const float3 beer_attenuation = dot(n, ray.direction) > 0.0f ? exp(extinction_constant * t_hit) : make_float3(1.0f); bool inside = false; if(prd_radiance.depth < max_depth) { float3 t; if(refract(t, i, n, refraction_index)) { // check for external or internal reflection float cos_theta = dot(i, n); if(cos_theta < 0.0f) cos_theta = -cos_theta; else { inside = true; cos_theta = dot(t, n); } reflection = fresnel_schlick(cos_theta, fresnel_exponent, fresnel_minimum, fresnel_maximum); const float importance = prd_radiance.importance * (1.0f - reflection) * optix::luminance(refraction_color * beer_attenuation); if(importance > importance_cutoff) { optix::Ray ray(h, t, radiance_ray_type, scene_epsilon); PerRayData_radiance refr_prd; refr_prd.depth = prd_radiance.depth+1; refr_prd.importance = importance; rtTrace(top_object, ray, refr_prd); result += (1.0f - reflection) * refraction_color * refr_prd.result; } else result += (1.0f - reflection) * refraction_color * cutoff_color; } float3 r = reflect(i, n); const float importance = prd_radiance.importance * reflection * optix::luminance(reflection_color * beer_attenuation); if(importance > importance_cutoff && (!inside || (inside && use_internal_reflections))) { const optix::Ray ray(h, r, radiance_ray_type, scene_epsilon); PerRayData_radiance refl_prd; refl_prd.depth = prd_radiance.depth + 1; refl_prd.importance = importance; rtTrace(top_object, ray, refl_prd); result += reflection * reflection_color * refl_prd.result; } else result += reflection * reflection_color * cutoff_color; } result = result * beer_attenuation; prd_radiance.result = result; } rtDeclareVariable(float3, Ka, , ); rtDeclareVariable(float3, Kd, , ); rtDeclareVariable(float3, Ks, , ); rtDeclareVariable(float3, reflectivity, , ); rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> diffuse_map; rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> specular_map; rtDeclareVariable(float3, texcoord, attribute texcoord, ); // //solid mesh with textures and reflectivity // RT_PROGRAM void closest_hit_mesh() { const float4 pKd = tex2D(diffuse_map, texcoord.x, texcoord.y); if(prd_radiance.depth < max_depth && pKd.w < importance_cutoff) { const optix::Ray newray(ray.origin + t_hit * ray.direction, ray.direction, radiance_ray_type, scene_epsilon); prd_radiance.depth++; rtTrace(top_object, newray, prd_radiance); return; } const float3 world_shading_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float3 world_geometric_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, geometric_normal)); const float3 ffnormal = faceforward(world_shading_normal, -ray.direction, world_geometric_normal); const float4 pKs = tex2D(specular_map, texcoord.x, texcoord.y); //phongShade(ffnormal, make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); //phongShade(make_float3(abs(ffnormal.x), abs(ffnormal.y), abs(ffnormal.z)), make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); phongShade(make_float3(pKd) * Ka, make_float3(pKd) * Kd, make_float3(pKs) * Ks, ffnormal, pKs.w * 255.0f, reflectivity); } // // Terminates and fully attenuates ray after any hit // RT_PROGRAM void any_hit_solid() { const float opacity = tex2D(diffuse_map, texcoord.x, texcoord.y).w; if(opacity < importance_cutoff) rtIgnoreIntersection(); phongShadowed(); }
a66307f34f5fbbf7e0b4d5ed4812c621978dd56d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[32,1,1] --blockDim=[32,1,1] //REQUIRES: hipExtent //REQUIRES: SURFACE #include "common.h" __global__ void d_integrate_trapezoidal(hipExtent extent) { uint x = blockIdx.x*blockDim.x + threadIdx.x; // for higher speed could use hierarchical approach for sum if (x >= extent.width) { return; } float stepsize = 1.0/float(extent.width-1); float to = float(x) * stepsize; float4 outclr = make_float4(0,0,0,0); float incr = stepsize; float4 lastval = tex1D(transferTex,0); float cur = incr; while (cur < to + incr * 0.5) { float4 val = tex1D(transferTex,cur); float4 trapezoid = (lastval+val)/2.0f; lastval = val; outclr += trapezoid; cur += incr; } // surface writes need byte offsets for x! surf1Dwrite(outclr,transferIntegrateSurf,x * sizeof(float4)); }
a66307f34f5fbbf7e0b4d5ed4812c621978dd56d.cu
//pass //--gridDim=[32,1,1] --blockDim=[32,1,1] //REQUIRES: cudaExtent //REQUIRES: SURFACE #include "common.h" __global__ void d_integrate_trapezoidal(cudaExtent extent) { uint x = blockIdx.x*blockDim.x + threadIdx.x; // for higher speed could use hierarchical approach for sum if (x >= extent.width) { return; } float stepsize = 1.0/float(extent.width-1); float to = float(x) * stepsize; float4 outclr = make_float4(0,0,0,0); float incr = stepsize; float4 lastval = tex1D(transferTex,0); float cur = incr; while (cur < to + incr * 0.5) { float4 val = tex1D(transferTex,cur); float4 trapezoid = (lastval+val)/2.0f; lastval = val; outclr += trapezoid; cur += incr; } // surface writes need byte offsets for x! surf1Dwrite(outclr,transferIntegrateSurf,x * sizeof(float4)); }
1ec0a57b6fef52a52eecfac130a36f204dbab19b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gradient2d-512-16-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_16(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; float __reg_13_0; float __reg_13_1; float __reg_13_2; float __reg_14_0; float __reg_14_1; float __reg_14_2; float __reg_15_0; float __reg_15_1; float __reg_15_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16); const AN5D_TYPE __storeValid = __writeValid16; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC15(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid15) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_15_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_15_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_15_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_15_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_15_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_15_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_15_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_15_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_15_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_15_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_15_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_15_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_15_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_15_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_15_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_15_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(1, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(2, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(3, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(4, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(5, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(6, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(7, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(8, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(9, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(10, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(11, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(12, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(13, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(14, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(15, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_0_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_0_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_0_1); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_15_0); __STORE(__h + 0, __reg_15_2, __reg_15_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } } __global__ void kernel0_15(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; float __reg_13_0; float __reg_13_1; float __reg_13_2; float __reg_14_0; float __reg_14_1; float __reg_14_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __storeValid = __writeValid15; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_14_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_14_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_14_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_14_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_14_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_14_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_14_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_14_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_14_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_14_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_14_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_14_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_14_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_14_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_14_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(2, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(3, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(4, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(5, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(6, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(7, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(8, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(9, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(10, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(11, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(12, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(13, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(14, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h + 0, __reg_14_0, __reg_14_1, __reg_0_2); } } else { for (__h = 31; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; } } __global__ void kernel0_14(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; float __reg_13_0; float __reg_13_1; float __reg_13_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __storeValid = __writeValid14; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_13_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_13_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_13_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_13_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_13_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_13_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_13_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_13_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_13_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_13_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_13_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_13_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_13_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_13_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(1, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(3, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(4, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(5, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(6, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(7, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(8, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(9, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(10, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(11, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(12, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(13, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h + 0, __reg_13_1, __reg_13_2, __reg_0_0); } } else { for (__h = 29; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } } __global__ void kernel0_13(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_12_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(3, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(4, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(5, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(6, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(7, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(8, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(9, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(10, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(11, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(12, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; } } __global__ void kernel0_12(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_11_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(2, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(3, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(4, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(5, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(6, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(7, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(8, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(9, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(10, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(11, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } } __global__ void kernel0_11(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_10_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(1, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(3, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(4, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(5, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(6, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(7, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(8, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(9, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(10, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; } } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
1ec0a57b6fef52a52eecfac130a36f204dbab19b.cu
#include "gradient2d-512-16-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_16(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; float __reg_13_0; float __reg_13_1; float __reg_13_2; float __reg_14_0; float __reg_14_1; float __reg_14_2; float __reg_15_0; float __reg_15_1; float __reg_15_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16); const AN5D_TYPE __storeValid = __writeValid16; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC15(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid15) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_15_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_15_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_15_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_15_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_15_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_15_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_15_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_15_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_15_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_15_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_15_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_15_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_15_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_15_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_15_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_15_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(1, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(2, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(3, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(4, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(5, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(6, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(7, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(8, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(9, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(10, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(11, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(12, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(13, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(14, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(15, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_0_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_0_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_0_1); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_15_0); __STORE(__h + 0, __reg_15_2, __reg_15_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } } __global__ void kernel0_15(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; float __reg_13_0; float __reg_13_1; float __reg_13_2; float __reg_14_0; float __reg_14_1; float __reg_14_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __storeValid = __writeValid15; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_14_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_14_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_14_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_14_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_14_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_14_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_14_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_14_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_14_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_14_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_14_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_14_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_14_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_14_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_14_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(2, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(3, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(4, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(5, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(6, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(7, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(8, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(9, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(10, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(11, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(12, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(13, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(14, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h + 0, __reg_14_0, __reg_14_1, __reg_0_2); } } else { for (__h = 31; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; } } __global__ void kernel0_14(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; float __reg_13_0; float __reg_13_1; float __reg_13_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __storeValid = __writeValid14; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_13_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_13_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_13_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_13_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_13_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_13_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_13_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_13_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_13_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_13_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_13_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_13_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_13_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_13_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(1, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(3, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(4, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(5, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(6, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(7, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(8, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(9, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(10, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(11, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(12, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(13, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h + 0, __reg_13_1, __reg_13_2, __reg_0_0); } } else { for (__h = 29; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } } __global__ void kernel0_13(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; float __reg_12_0; float __reg_12_1; float __reg_12_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_12_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(3, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(4, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(5, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(6, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(7, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(8, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(9, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(10, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(11, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(12, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; } } __global__ void kernel0_12(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; float __reg_11_0; float __reg_11_1; float __reg_11_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_11_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(2, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(3, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(4, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(5, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(6, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(7, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(8, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(9, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(10, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(11, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } } __global__ void kernel0_11(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; float __reg_10_0; float __reg_10_1; float __reg_10_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_10_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(1, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(3, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(4, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(5, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(6, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(7, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(8, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(9, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(10, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; } } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
2139ccd9adf30a422ce6ab8bec9b748752ae0fb3.hip
// !!! This is a file automatically generated by hipify!!! #include "cudnnconvgradient.h" void crossbowCudnnKernelConvGradient (void *args) { float alpha, beta; crossbowStreamP s = (crossbowStreamP) args; /* Get input buffer */ crossbowDataBufferP input = crossbowStreamGetCurrentInput (s); /* Get model variable `weights` and `bias` */ int hasBias = crossbowKernelConfigParamGetIntValue ((crossbowKernelConfigParamP) crossbowArrayListGet(s->op->kernel->parameters, 0)); /* Set model and local variables */ crossbowDataBufferP weightGradient, biasGradient = NULL; int weightGradientOffset = 0, biasGradientOffset = 0; int weightGradientLength = 0, biasGradientLength = 0; /* * The following implements the parameter server synchronisation model * * The GPU worker should wait for the application of the previously computed gradient (if any) * to the parameter server model (scheduled on a different stream) to complete. */ #ifdef UPDATE_MODEL_INCREMENTALLY checkCudaErrors(hipStreamWaitEvent(s->stream[s->op->branch], s->model->server[s->op->peer->id], 0)); #else checkCudaErrors(hipStreamWaitEvent(s->stream[s->op->branch], s->model->server, 0)); #endif weightGradient = crossbowModelVariableGradient (s->model, s->op->peer->kernel->id, 1, &weightGradientOffset, &weightGradientLength); if (hasBias) biasGradient = crossbowModelVariableGradient (s->model, s->op->peer->kernel->id, 2, &biasGradientOffset, &biasGradientLength); /* Fill gradient buffer with zeros */ #ifndef CUDART_NOOP hipMemsetAsync ((void *)(((char *) weightGradient->dev) + weightGradientOffset), 0, weightGradientLength + biasGradientLength, s->stream[s->op->branch]); #else /* Subterfuge unused parameter warnings */ UNUSED (weightGradient); UNUSED (weightGradientOffset); UNUSED (weightGradientLength); UNUSED (biasGradientLength); #endif nullPointerException(s->op->peer); /* Get input of peer operator */ crossbowDataBufferP peerInput = crossbowStreamGetPeerInput (s); /* Get cuDNN pooling parameters (from peer operator) */ crossbowCudnnConvParamsP params = s->op->peer->kernel->descriptors.conv; /* Get workspace variable */ int backwardFilterWorkSpaceSizeInBytes = 0; crossbowDataBufferP backwardFilterWorkSpace = NULL; if (params->backwardFilterWorkSpaceSize > 0) backwardFilterWorkSpace = crossbowLocalVariableGetDataBuffer ( (crossbowLocalVariableP) crossbowArrayListGet (s->op->peer->kernel->variables, 1), s->deviceId, s->id, NULL, &backwardFilterWorkSpaceSizeInBytes); /* Compute gradient with respect to bias */ if (hasBias) { alpha = 1; beta = 1; #ifndef CUDANN_NOOP checkCudnnStatus(cudnnConvolutionBackwardBias(s->cudnnHandle[s->op->branch], &alpha, params->output->descriptor, input->dev, &beta, params->biasDesc, (void *) ((char *) (biasGradient->dev) + biasGradientOffset) )); #else /* Subterfuge unused parameter warnings */ UNUSED (alpha); UNUSED (params); UNUSED (input); UNUSED (beta); UNUSED (biasGradient); UNUSED (biasGradientOffset); #endif #ifdef COMPUTE_CHECKSUM float biasGradientChecksum = crossbowDataBufferComputeCheckSum (s->model->gradient, biasGradientOffset, biasGradientLength); info("Kernel's %s bias gradient checksum is %.5f\n", s->op->kernel->name, biasGradientChecksum); #endif } /* Compute gradient with respect to weights */ alpha = 1; beta = 1; #ifndef CUDANN_NOOP checkCudnnStatus(cudnnConvolutionBackwardFilter(s->cudnnHandle[s->op->branch], &alpha, params->input->descriptor, peerInput->dev, params->output->descriptor, input->dev, params->convDesc, params->backwardFilterAlgorithm, (backwardFilterWorkSpace) ? backwardFilterWorkSpace->dev : NULL, backwardFilterWorkSpaceSizeInBytes, &beta, params->filterDesc, (void *) ((char *) (weightGradient->dev) + weightGradientOffset) )); #else /* Subterfuge unused parameter warnings */ UNUSED (alpha); UNUSED (params); UNUSED (peerInput); UNUSED (input); UNUSED (backwardFilterWorkSpace); UNUSED (backwardFilterWorkSpaceSizeInBytes); UNUSED (beta); UNUSED (weightGradient); UNUSED (weightGradientOffset); #endif /* Compute input data gradient */ int backwardDataWorkSpaceSizeInBytes = 0; crossbowDataBufferP backwardDataWorkSpace = NULL; if (! crossbowDataflowMostUpstream (s->dataflow, s->op->peer)) { /* Get an output variable buffer */ crossbowDataBufferP output = crossbowStreamGetCurrentOutput (s); int weightOffset = 0; #ifdef TRAIN_WITH_MASTER crossbowDataBufferP weight = crossbowModelVariable (s->theModel, s->op->peer->kernel->id, 1, &weightOffset, NULL); #else crossbowDataBufferP weight = crossbowModelVariable (s->model, s->op->peer->kernel->id, 1, &weightOffset, NULL); #endif /* Get workspace variable */ if (params->backwardDataWorkSpaceSize > 0) backwardDataWorkSpace = crossbowLocalVariableGetDataBuffer ( (crossbowLocalVariableP) crossbowArrayListGet (s->op->peer->kernel->variables, 2), s->deviceId, s->id, NULL, &backwardDataWorkSpaceSizeInBytes); alpha = 1; beta = 0; #ifndef CUDANN_NOOP checkCudnnStatus(cudnnConvolutionBackwardData(s->cudnnHandle[s->op->branch], &alpha, params->filterDesc, (void *) ((char *) (weight->dev) + weightOffset), params->output->descriptor, input->dev, params->convDesc, params->backwardDataAlgorithm, (backwardDataWorkSpace) ? backwardDataWorkSpace->dev : NULL, backwardDataWorkSpaceSizeInBytes, &beta, params->input->descriptor, output->dev )); #else /* Subterfuge unused parameter warnings */ UNUSED (alpha); UNUSED (params); UNUSED (weight); UNUSED (weightOffset); UNUSED (input); UNUSED (backwardDataWorkSpace); UNUSED (backwardDataWorkSpaceSizeInBytes); UNUSED (beta); UNUSED (output); #endif /* Store output in stream */ crossbowListAppend(s->outputs[s->op->id], output); } /* Return read-write local variables to kernel when the dataflow execution completes */ if (backwardFilterWorkSpace) crossbowListAppend (s->locals[s->op->id], backwardFilterWorkSpace); if (backwardDataWorkSpace) crossbowListAppend (s->locals[s->op->id], backwardDataWorkSpace); return; }
2139ccd9adf30a422ce6ab8bec9b748752ae0fb3.cu
#include "cudnnconvgradient.h" void crossbowCudnnKernelConvGradient (void *args) { float alpha, beta; crossbowStreamP s = (crossbowStreamP) args; /* Get input buffer */ crossbowDataBufferP input = crossbowStreamGetCurrentInput (s); /* Get model variable `weights` and `bias` */ int hasBias = crossbowKernelConfigParamGetIntValue ((crossbowKernelConfigParamP) crossbowArrayListGet(s->op->kernel->parameters, 0)); /* Set model and local variables */ crossbowDataBufferP weightGradient, biasGradient = NULL; int weightGradientOffset = 0, biasGradientOffset = 0; int weightGradientLength = 0, biasGradientLength = 0; /* * The following implements the parameter server synchronisation model * * The GPU worker should wait for the application of the previously computed gradient (if any) * to the parameter server model (scheduled on a different stream) to complete. */ #ifdef UPDATE_MODEL_INCREMENTALLY checkCudaErrors(cudaStreamWaitEvent(s->stream[s->op->branch], s->model->server[s->op->peer->id], 0)); #else checkCudaErrors(cudaStreamWaitEvent(s->stream[s->op->branch], s->model->server, 0)); #endif weightGradient = crossbowModelVariableGradient (s->model, s->op->peer->kernel->id, 1, &weightGradientOffset, &weightGradientLength); if (hasBias) biasGradient = crossbowModelVariableGradient (s->model, s->op->peer->kernel->id, 2, &biasGradientOffset, &biasGradientLength); /* Fill gradient buffer with zeros */ #ifndef CUDART_NOOP cudaMemsetAsync ((void *)(((char *) weightGradient->dev) + weightGradientOffset), 0, weightGradientLength + biasGradientLength, s->stream[s->op->branch]); #else /* Subterfuge unused parameter warnings */ UNUSED (weightGradient); UNUSED (weightGradientOffset); UNUSED (weightGradientLength); UNUSED (biasGradientLength); #endif nullPointerException(s->op->peer); /* Get input of peer operator */ crossbowDataBufferP peerInput = crossbowStreamGetPeerInput (s); /* Get cuDNN pooling parameters (from peer operator) */ crossbowCudnnConvParamsP params = s->op->peer->kernel->descriptors.conv; /* Get workspace variable */ int backwardFilterWorkSpaceSizeInBytes = 0; crossbowDataBufferP backwardFilterWorkSpace = NULL; if (params->backwardFilterWorkSpaceSize > 0) backwardFilterWorkSpace = crossbowLocalVariableGetDataBuffer ( (crossbowLocalVariableP) crossbowArrayListGet (s->op->peer->kernel->variables, 1), s->deviceId, s->id, NULL, &backwardFilterWorkSpaceSizeInBytes); /* Compute gradient with respect to bias */ if (hasBias) { alpha = 1; beta = 1; #ifndef CUDANN_NOOP checkCudnnStatus(cudnnConvolutionBackwardBias(s->cudnnHandle[s->op->branch], &alpha, params->output->descriptor, input->dev, &beta, params->biasDesc, (void *) ((char *) (biasGradient->dev) + biasGradientOffset) )); #else /* Subterfuge unused parameter warnings */ UNUSED (alpha); UNUSED (params); UNUSED (input); UNUSED (beta); UNUSED (biasGradient); UNUSED (biasGradientOffset); #endif #ifdef COMPUTE_CHECKSUM float biasGradientChecksum = crossbowDataBufferComputeCheckSum (s->model->gradient, biasGradientOffset, biasGradientLength); info("Kernel's %s bias gradient checksum is %.5f\n", s->op->kernel->name, biasGradientChecksum); #endif } /* Compute gradient with respect to weights */ alpha = 1; beta = 1; #ifndef CUDANN_NOOP checkCudnnStatus(cudnnConvolutionBackwardFilter(s->cudnnHandle[s->op->branch], &alpha, params->input->descriptor, peerInput->dev, params->output->descriptor, input->dev, params->convDesc, params->backwardFilterAlgorithm, (backwardFilterWorkSpace) ? backwardFilterWorkSpace->dev : NULL, backwardFilterWorkSpaceSizeInBytes, &beta, params->filterDesc, (void *) ((char *) (weightGradient->dev) + weightGradientOffset) )); #else /* Subterfuge unused parameter warnings */ UNUSED (alpha); UNUSED (params); UNUSED (peerInput); UNUSED (input); UNUSED (backwardFilterWorkSpace); UNUSED (backwardFilterWorkSpaceSizeInBytes); UNUSED (beta); UNUSED (weightGradient); UNUSED (weightGradientOffset); #endif /* Compute input data gradient */ int backwardDataWorkSpaceSizeInBytes = 0; crossbowDataBufferP backwardDataWorkSpace = NULL; if (! crossbowDataflowMostUpstream (s->dataflow, s->op->peer)) { /* Get an output variable buffer */ crossbowDataBufferP output = crossbowStreamGetCurrentOutput (s); int weightOffset = 0; #ifdef TRAIN_WITH_MASTER crossbowDataBufferP weight = crossbowModelVariable (s->theModel, s->op->peer->kernel->id, 1, &weightOffset, NULL); #else crossbowDataBufferP weight = crossbowModelVariable (s->model, s->op->peer->kernel->id, 1, &weightOffset, NULL); #endif /* Get workspace variable */ if (params->backwardDataWorkSpaceSize > 0) backwardDataWorkSpace = crossbowLocalVariableGetDataBuffer ( (crossbowLocalVariableP) crossbowArrayListGet (s->op->peer->kernel->variables, 2), s->deviceId, s->id, NULL, &backwardDataWorkSpaceSizeInBytes); alpha = 1; beta = 0; #ifndef CUDANN_NOOP checkCudnnStatus(cudnnConvolutionBackwardData(s->cudnnHandle[s->op->branch], &alpha, params->filterDesc, (void *) ((char *) (weight->dev) + weightOffset), params->output->descriptor, input->dev, params->convDesc, params->backwardDataAlgorithm, (backwardDataWorkSpace) ? backwardDataWorkSpace->dev : NULL, backwardDataWorkSpaceSizeInBytes, &beta, params->input->descriptor, output->dev )); #else /* Subterfuge unused parameter warnings */ UNUSED (alpha); UNUSED (params); UNUSED (weight); UNUSED (weightOffset); UNUSED (input); UNUSED (backwardDataWorkSpace); UNUSED (backwardDataWorkSpaceSizeInBytes); UNUSED (beta); UNUSED (output); #endif /* Store output in stream */ crossbowListAppend(s->outputs[s->op->id], output); } /* Return read-write local variables to kernel when the dataflow execution completes */ if (backwardFilterWorkSpace) crossbowListAppend (s->locals[s->op->id], backwardFilterWorkSpace); if (backwardDataWorkSpace) crossbowListAppend (s->locals[s->op->id], backwardDataWorkSpace); return; }
d36cc9ff6fb3a31e43a2d59ba20000344165829e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <iostream> #include <math.h> #include <time.h> #include <hipfft.h> #include <hipfftXt.h> #include <hip/hip_complex.h> #include <random> #include "fdtdrav.h" //using namespace std; using std::cout; using std::endl; int main(int argc, char ** argv) { const int numdev = atoi(argv[2]); //Measuring total time of execution clock_t tStart; //initializing hte clock tStart = clock(); //Define problem main constants float eps_0 = 8.854187817e-12; // permittivity of free space float pi = 3.1415; // pi float mu_0 = (4 * pi)*1e-7; // permeability of free space float c = 1 / sqrt(mu_0*eps_0); // speed of light //Define space Parameters float dx, dy, dz, dt; // cell size x, y and z dimensions//Time step dx = 0.389e-3; dy = 0.4e-3; dz = 0.265e-3 ; float courant_factor; // courant factor courant_factor = 1; //time step size dt = (1 / (c*sqrt((1 / pow(dx, 2)) + (1 / pow(dy, 2)) + (1 / pow(dz, 2))))); dt = courant_factor * dt; // number of time steps int n_t_steps; n_t_steps = atoi(argv[1]); // pml size in each direction int pml_x_n, pml_y_n, pml_z_n, pml_x_p, pml_y_p, pml_z_p; pml_x_n = 10; pml_y_n = 10; pml_z_n = 10; pml_x_p = 10; pml_y_p = 10; pml_z_p = 10; //Air buffer in each direction int air_buff_x_n, air_buff_y_n, air_buff_z_n, air_buff_x_p, air_buff_y_p, air_buff_z_p; air_buff_x_n = 15; air_buff_y_n = 15; air_buff_z_n = 15; air_buff_x_p = 15; air_buff_y_p = 15; air_buff_z_p = 15; //OBJECTS DEFINITION // reat 3d structures such as bricks and spheres const int brick_num = 1; float brick_min_x[brick_num], brick_min_y[brick_num], brick_min_z[brick_num], brick_max_x[brick_num], brick_max_y[brick_num], brick_max_z[brick_num]; float brick_sigma_e_x[brick_num], brick_sigma_e_y[brick_num], brick_sigma_e_z[brick_num]; float brick_eps_r_x[brick_num], brick_eps_r_y[brick_num], brick_eps_r_z[brick_num]; float brick_sigma_m_x[brick_num], brick_sigma_m_y[brick_num], brick_sigma_m_z[brick_num]; float brick_mu_r_x[brick_num], brick_mu_r_y[brick_num], brick_mu_r_z[brick_num]; int brick_opt; //opt == 0, do not construct object brick_opt = 1; brick_min_x[0]= 0e-3; brick_min_y[0] = 0e-3; brick_min_z[0] = 0e-3; brick_max_x[0] = 60*dx; brick_max_y[0] = 100*dy; brick_max_z[0] = 3*dz; brick_sigma_e_x[0] = 0.0004; brick_sigma_e_y[0] =0.0004; brick_sigma_e_z[0] = 0.0004; brick_eps_r_x[0] = 2.2; brick_eps_r_y[0] = 2.2; brick_eps_r_z[0] = 2.2; brick_sigma_m_x[0] = 1.2e-38; brick_sigma_m_y[0] = 1.2e-38; brick_sigma_m_z[0] = 1.2e-38; brick_mu_r_x[0] = 1; brick_mu_r_y[0] = 1; brick_mu_r_z[0] = 1; //create 2d structurs (pec plates for the most part) //pecs quantity const int pec_num =3; float pec_min_x[pec_num]; float pec_min_y[pec_num]; float pec_min_z[pec_num]; float pec_max_x[pec_num]; float pec_max_y[pec_num]; float pec_max_z[pec_num]; float pec_sigma_e_x[pec_num]; float pec_sigma_e_y[pec_num]; float pec_sigma_e_z[pec_num]; int pec_opt; pec_opt = 1; //PEC 1 pec_min_x[0]= 0; pec_min_y[0] = 0; pec_min_z[0] = 0; pec_max_x[0] = 60*dx; pec_max_y[0] = 100*dy; pec_max_z[0] = 0; pec_sigma_e_x[0] = 1e10; pec_sigma_e_y[0] = 1e10; pec_sigma_e_z[0] = 1e10; //PEC 2 pec_min_x[1]= 7.535e-3; pec_min_y[1] = 0; pec_min_z[1] = 3*dz; pec_max_x[1] = 9.869e-3; pec_max_y[1] = 50*dy; pec_max_z[1] = 3*dz; pec_sigma_e_x[1] = 1e10; pec_sigma_e_y[1] = 1e10; pec_sigma_e_z[1] = 1e10; //PEC 3 pec_min_x[2]= 5.445e-3; pec_min_y[2] = 50*dy; pec_min_z[2] = 3*dz; pec_max_x[2] = 17.895e-3; pec_max_y[2] = (50*dy) + (16e-3); pec_max_z[2] = 3*dz; pec_sigma_e_x[2] = 1e10; pec_sigma_e_y[2] = 1e10; pec_sigma_e_z[2] = 1e10; //create source //start and end of the source //Source coordinates float source_min_x = 7.535e-3; float source_min_y = 0e-3; float source_min_z = 0e-3; float source_max_x = 9.869e-3; float source_max_y = 0e-3; float source_max_z = 3*dz; //source type : 1 gaussian pulse, 2 gaussian derivative and 3 senoidal int source_tp = 1; //source frequency float source_freq = 1e8; //source amplitude float source_amp = 1; //Source resistance float rs = 50; //Source direction int source_direction = 3; //Gaussian Pulse parameters float nc = 20; float tau = (nc*dy) / (2 * c); //tau = 15e-12; float t_0 = 3 * tau; //Create Resistors float resistor_min_x; float resistor_min_y; float resistor_min_z; float resistor_max_x; float resistor_max_y; float resistor_max_z; float resistor_resist; int resistor_direction; int resistor_opt; resistor_opt = 0; resistor_min_x = 7e-3; resistor_min_y = 0; resistor_min_z = 0; resistor_max_x = 8e-3; resistor_max_y = 2e-3; resistor_max_z = 4e-3; resistor_resist = 50; resistor_direction = 1; //Define Output variables //Sample voltage index float sampled_voltage_min_x, sampled_voltage_max_x, sampled_voltage_min_y, sampled_voltage_max_y, sampled_voltage_min_z, sampled_voltage_max_z; int voltage_direction; //Sampling voltage positions sampled_voltage_min_x = 7.535e-3; sampled_voltage_min_y = 10*dy; sampled_voltage_min_z = 0e-3; sampled_voltage_max_x = 9.869e-3; sampled_voltage_max_y = 10*dy; sampled_voltage_max_z = 3*dz; voltage_direction = 3; //Sampled Current Index float sampled_current_min_x, sampled_current_max_x, sampled_current_min_y, sampled_current_max_y, sampled_current_min_z, sampled_current_max_z; int current_direction; //Sampling current positions sampled_current_min_x = 7.535e-3; sampled_current_min_y = 10*dy; sampled_current_min_z = 3*dz; sampled_current_max_x = 9.869e-3; sampled_current_max_y = 10*dy; sampled_current_max_z = 3*dz; current_direction = 2; //calculate size of the box //box, that involves all the objects, coordiantes float box_min_x,box_min_y,box_min_z,box_max_x,box_max_y,box_max_z; int pec_num2 = ((pec_opt) != 0) ? pec_num : 0; int brick_num2 = ((brick_opt) != 0) ? brick_num : 0; int ob = pec_num2 + brick_num2; float m_min_x[ob],m_min_y[ob],m_min_z[ob],m_max_x[ob],m_max_y[ob],m_max_z[ob]; float size_x, size_y, size_z; for(int i = 0; i < brick_num2; i++){ m_min_x[i] = brick_min_x[i]; m_min_y[i] = brick_min_y[i]; m_min_z[i] = brick_min_z[i]; m_max_x[i] = brick_max_x[i]; m_max_y[i] = brick_max_y[i]; m_max_z[i] = brick_max_z[i]; } for(int i = brick_num2; i < ob; i++){ m_min_x[i] = pec_min_x[i-brick_num2]; m_min_y[i] = pec_min_y[i-brick_num2]; m_min_z[i] = pec_min_z[i-brick_num2]; m_max_x[i] = pec_max_x[i-brick_num2]; m_max_y[i] = pec_max_y[i-brick_num2]; m_max_z[i] = pec_max_z[i-brick_num2]; } box_min_x = m_min_x[0]; box_min_y = m_min_y[0]; box_min_z = m_min_z[0]; box_max_x = m_max_x[0]; box_max_y = m_max_y[0]; box_max_z = m_max_z[0]; for(int i=0; i < ob; i++){ if( m_min_x[i] < box_min_x ){ box_min_x = m_min_x[i]; } if( m_min_y[i] < box_min_y ){ box_min_y = m_min_y[i]; } if( m_min_z[i] < box_min_z ){ box_min_z = m_min_z[i]; } if( m_max_x[i] < box_max_x ){ box_max_x = m_max_x[i]; } if( m_max_z[i] < box_max_z ){ box_max_z = m_max_z[i]; } if( m_max_z[i] < box_max_z ){ box_max_z = m_max_z[i]; } } size_x = box_max_x - box_min_x; size_y = box_max_y - box_min_y; size_z = box_max_z - box_min_z; int X = round(size_x / dx); int Y = round(size_y / dy); int Z = round(size_z / dz); marchingLoop( numdev, eps_0, pi , mu_0, c, dx, dy, dz, dt, X, Y, Z, n_t_steps, pml_x_n, pml_y_n, pml_z_n, pml_x_p, pml_y_p, pml_z_p, air_buff_x_n, air_buff_y_n, air_buff_z_n, air_buff_x_p, air_buff_y_p, air_buff_z_p, source_tp, source_freq, source_amp, source_min_x , source_min_y , source_min_z, source_max_x, source_max_y, source_max_z, source_direction, rs, nc, tau, t_0, brick_min_x,brick_min_y,brick_min_z, brick_max_x,brick_max_y,brick_max_z, brick_sigma_e_x, brick_sigma_e_y, brick_sigma_e_z, brick_eps_r_x, brick_eps_r_y, brick_eps_r_z, brick_sigma_m_x, brick_sigma_m_y, brick_sigma_m_z, brick_mu_r_x, brick_mu_r_y, brick_mu_r_z, brick_opt, brick_num, pec_min_x, pec_min_y, pec_min_z, pec_max_x, pec_max_y, pec_max_z, pec_sigma_e_x, pec_sigma_e_y, pec_sigma_e_z, pec_opt, pec_num, resistor_min_x, resistor_min_y, resistor_min_z, resistor_max_x, resistor_max_y, resistor_max_z, resistor_resist, resistor_direction, resistor_opt, sampled_voltage_min_x, sampled_voltage_min_y, sampled_voltage_min_z, sampled_voltage_max_x, sampled_voltage_max_y, sampled_voltage_max_z, voltage_direction, sampled_current_min_x, sampled_current_min_y, sampled_current_min_z, sampled_current_max_x, sampled_current_max_y, sampled_current_max_z, current_direction); cout << "End of Program" << endl; // total time of execution cout << "\nTotal time elapsed: " << (float)(clock() - tStart) / CLOCKS_PER_SEC << endl; getchar(); return 0; }
d36cc9ff6fb3a31e43a2d59ba20000344165829e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <iostream> #include <math.h> #include <time.h> #include <cufft.h> #include <cufftXt.h> #include <cuComplex.h> #include <random> #include "fdtdrav.h" //using namespace std; using std::cout; using std::endl; int main(int argc, char ** argv) { const int numdev = atoi(argv[2]); //Measuring total time of execution clock_t tStart; //initializing hte clock tStart = clock(); //Define problem main constants float eps_0 = 8.854187817e-12; // permittivity of free space float pi = 3.1415; // pi float mu_0 = (4 * pi)*1e-7; // permeability of free space float c = 1 / sqrt(mu_0*eps_0); // speed of light //Define space Parameters float dx, dy, dz, dt; // cell size x, y and z dimensions//Time step dx = 0.389e-3; dy = 0.4e-3; dz = 0.265e-3 ; float courant_factor; // courant factor courant_factor = 1; //time step size dt = (1 / (c*sqrt((1 / pow(dx, 2)) + (1 / pow(dy, 2)) + (1 / pow(dz, 2))))); dt = courant_factor * dt; // number of time steps int n_t_steps; n_t_steps = atoi(argv[1]); // pml size in each direction int pml_x_n, pml_y_n, pml_z_n, pml_x_p, pml_y_p, pml_z_p; pml_x_n = 10; pml_y_n = 10; pml_z_n = 10; pml_x_p = 10; pml_y_p = 10; pml_z_p = 10; //Air buffer in each direction int air_buff_x_n, air_buff_y_n, air_buff_z_n, air_buff_x_p, air_buff_y_p, air_buff_z_p; air_buff_x_n = 15; air_buff_y_n = 15; air_buff_z_n = 15; air_buff_x_p = 15; air_buff_y_p = 15; air_buff_z_p = 15; //OBJECTS DEFINITION // reat 3d structures such as bricks and spheres const int brick_num = 1; float brick_min_x[brick_num], brick_min_y[brick_num], brick_min_z[brick_num], brick_max_x[brick_num], brick_max_y[brick_num], brick_max_z[brick_num]; float brick_sigma_e_x[brick_num], brick_sigma_e_y[brick_num], brick_sigma_e_z[brick_num]; float brick_eps_r_x[brick_num], brick_eps_r_y[brick_num], brick_eps_r_z[brick_num]; float brick_sigma_m_x[brick_num], brick_sigma_m_y[brick_num], brick_sigma_m_z[brick_num]; float brick_mu_r_x[brick_num], brick_mu_r_y[brick_num], brick_mu_r_z[brick_num]; int brick_opt; //opt == 0, do not construct object brick_opt = 1; brick_min_x[0]= 0e-3; brick_min_y[0] = 0e-3; brick_min_z[0] = 0e-3; brick_max_x[0] = 60*dx; brick_max_y[0] = 100*dy; brick_max_z[0] = 3*dz; brick_sigma_e_x[0] = 0.0004; brick_sigma_e_y[0] =0.0004; brick_sigma_e_z[0] = 0.0004; brick_eps_r_x[0] = 2.2; brick_eps_r_y[0] = 2.2; brick_eps_r_z[0] = 2.2; brick_sigma_m_x[0] = 1.2e-38; brick_sigma_m_y[0] = 1.2e-38; brick_sigma_m_z[0] = 1.2e-38; brick_mu_r_x[0] = 1; brick_mu_r_y[0] = 1; brick_mu_r_z[0] = 1; //create 2d structurs (pec plates for the most part) //pecs quantity const int pec_num =3; float pec_min_x[pec_num]; float pec_min_y[pec_num]; float pec_min_z[pec_num]; float pec_max_x[pec_num]; float pec_max_y[pec_num]; float pec_max_z[pec_num]; float pec_sigma_e_x[pec_num]; float pec_sigma_e_y[pec_num]; float pec_sigma_e_z[pec_num]; int pec_opt; pec_opt = 1; //PEC 1 pec_min_x[0]= 0; pec_min_y[0] = 0; pec_min_z[0] = 0; pec_max_x[0] = 60*dx; pec_max_y[0] = 100*dy; pec_max_z[0] = 0; pec_sigma_e_x[0] = 1e10; pec_sigma_e_y[0] = 1e10; pec_sigma_e_z[0] = 1e10; //PEC 2 pec_min_x[1]= 7.535e-3; pec_min_y[1] = 0; pec_min_z[1] = 3*dz; pec_max_x[1] = 9.869e-3; pec_max_y[1] = 50*dy; pec_max_z[1] = 3*dz; pec_sigma_e_x[1] = 1e10; pec_sigma_e_y[1] = 1e10; pec_sigma_e_z[1] = 1e10; //PEC 3 pec_min_x[2]= 5.445e-3; pec_min_y[2] = 50*dy; pec_min_z[2] = 3*dz; pec_max_x[2] = 17.895e-3; pec_max_y[2] = (50*dy) + (16e-3); pec_max_z[2] = 3*dz; pec_sigma_e_x[2] = 1e10; pec_sigma_e_y[2] = 1e10; pec_sigma_e_z[2] = 1e10; //create source //start and end of the source //Source coordinates float source_min_x = 7.535e-3; float source_min_y = 0e-3; float source_min_z = 0e-3; float source_max_x = 9.869e-3; float source_max_y = 0e-3; float source_max_z = 3*dz; //source type : 1 gaussian pulse, 2 gaussian derivative and 3 senoidal int source_tp = 1; //source frequency float source_freq = 1e8; //source amplitude float source_amp = 1; //Source resistance float rs = 50; //Source direction int source_direction = 3; //Gaussian Pulse parameters float nc = 20; float tau = (nc*dy) / (2 * c); //tau = 15e-12; float t_0 = 3 * tau; //Create Resistors float resistor_min_x; float resistor_min_y; float resistor_min_z; float resistor_max_x; float resistor_max_y; float resistor_max_z; float resistor_resist; int resistor_direction; int resistor_opt; resistor_opt = 0; resistor_min_x = 7e-3; resistor_min_y = 0; resistor_min_z = 0; resistor_max_x = 8e-3; resistor_max_y = 2e-3; resistor_max_z = 4e-3; resistor_resist = 50; resistor_direction = 1; //Define Output variables //Sample voltage index float sampled_voltage_min_x, sampled_voltage_max_x, sampled_voltage_min_y, sampled_voltage_max_y, sampled_voltage_min_z, sampled_voltage_max_z; int voltage_direction; //Sampling voltage positions sampled_voltage_min_x = 7.535e-3; sampled_voltage_min_y = 10*dy; sampled_voltage_min_z = 0e-3; sampled_voltage_max_x = 9.869e-3; sampled_voltage_max_y = 10*dy; sampled_voltage_max_z = 3*dz; voltage_direction = 3; //Sampled Current Index float sampled_current_min_x, sampled_current_max_x, sampled_current_min_y, sampled_current_max_y, sampled_current_min_z, sampled_current_max_z; int current_direction; //Sampling current positions sampled_current_min_x = 7.535e-3; sampled_current_min_y = 10*dy; sampled_current_min_z = 3*dz; sampled_current_max_x = 9.869e-3; sampled_current_max_y = 10*dy; sampled_current_max_z = 3*dz; current_direction = 2; //calculate size of the box //box, that involves all the objects, coordiantes float box_min_x,box_min_y,box_min_z,box_max_x,box_max_y,box_max_z; int pec_num2 = ((pec_opt) != 0) ? pec_num : 0; int brick_num2 = ((brick_opt) != 0) ? brick_num : 0; int ob = pec_num2 + brick_num2; float m_min_x[ob],m_min_y[ob],m_min_z[ob],m_max_x[ob],m_max_y[ob],m_max_z[ob]; float size_x, size_y, size_z; for(int i = 0; i < brick_num2; i++){ m_min_x[i] = brick_min_x[i]; m_min_y[i] = brick_min_y[i]; m_min_z[i] = brick_min_z[i]; m_max_x[i] = brick_max_x[i]; m_max_y[i] = brick_max_y[i]; m_max_z[i] = brick_max_z[i]; } for(int i = brick_num2; i < ob; i++){ m_min_x[i] = pec_min_x[i-brick_num2]; m_min_y[i] = pec_min_y[i-brick_num2]; m_min_z[i] = pec_min_z[i-brick_num2]; m_max_x[i] = pec_max_x[i-brick_num2]; m_max_y[i] = pec_max_y[i-brick_num2]; m_max_z[i] = pec_max_z[i-brick_num2]; } box_min_x = m_min_x[0]; box_min_y = m_min_y[0]; box_min_z = m_min_z[0]; box_max_x = m_max_x[0]; box_max_y = m_max_y[0]; box_max_z = m_max_z[0]; for(int i=0; i < ob; i++){ if( m_min_x[i] < box_min_x ){ box_min_x = m_min_x[i]; } if( m_min_y[i] < box_min_y ){ box_min_y = m_min_y[i]; } if( m_min_z[i] < box_min_z ){ box_min_z = m_min_z[i]; } if( m_max_x[i] < box_max_x ){ box_max_x = m_max_x[i]; } if( m_max_z[i] < box_max_z ){ box_max_z = m_max_z[i]; } if( m_max_z[i] < box_max_z ){ box_max_z = m_max_z[i]; } } size_x = box_max_x - box_min_x; size_y = box_max_y - box_min_y; size_z = box_max_z - box_min_z; int X = round(size_x / dx); int Y = round(size_y / dy); int Z = round(size_z / dz); marchingLoop( numdev, eps_0, pi , mu_0, c, dx, dy, dz, dt, X, Y, Z, n_t_steps, pml_x_n, pml_y_n, pml_z_n, pml_x_p, pml_y_p, pml_z_p, air_buff_x_n, air_buff_y_n, air_buff_z_n, air_buff_x_p, air_buff_y_p, air_buff_z_p, source_tp, source_freq, source_amp, source_min_x , source_min_y , source_min_z, source_max_x, source_max_y, source_max_z, source_direction, rs, nc, tau, t_0, brick_min_x,brick_min_y,brick_min_z, brick_max_x,brick_max_y,brick_max_z, brick_sigma_e_x, brick_sigma_e_y, brick_sigma_e_z, brick_eps_r_x, brick_eps_r_y, brick_eps_r_z, brick_sigma_m_x, brick_sigma_m_y, brick_sigma_m_z, brick_mu_r_x, brick_mu_r_y, brick_mu_r_z, brick_opt, brick_num, pec_min_x, pec_min_y, pec_min_z, pec_max_x, pec_max_y, pec_max_z, pec_sigma_e_x, pec_sigma_e_y, pec_sigma_e_z, pec_opt, pec_num, resistor_min_x, resistor_min_y, resistor_min_z, resistor_max_x, resistor_max_y, resistor_max_z, resistor_resist, resistor_direction, resistor_opt, sampled_voltage_min_x, sampled_voltage_min_y, sampled_voltage_min_z, sampled_voltage_max_x, sampled_voltage_max_y, sampled_voltage_max_z, voltage_direction, sampled_current_min_x, sampled_current_min_y, sampled_current_min_z, sampled_current_max_x, sampled_current_max_y, sampled_current_max_z, current_direction); cout << "End of Program" << endl; // total time of execution cout << "\nTotal time elapsed: " << (float)(clock() - tStart) / CLOCKS_PER_SEC << endl; getchar(); return 0; }
f2038828edf9dc155335260e1ee02de823272782.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void init_i32 (int* vector, int value, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { vector[idx] = value; } }
f2038828edf9dc155335260e1ee02de823272782.cu
#include "includes.h" __global__ void init_i32 (int* vector, int value, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { vector[idx] = value; } }
4bcd81e297229ca532672a2f722ce4ab1d72f94a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define N (1024) // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(float *a, float *b, float *c) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds // if (id < N) c[id] = a[id] + b[id]; } int main( int argc, char* argv[] ) { // Size of vectors //int n = 10000; // Host input vectors float *h_a; float *h_b; //Host output vector float *h_c; // Device input vectors float *d_a; float *d_b; //Device output vector float *d_c; // Size, in bytes, of each vector size_t bytes = N*sizeof(float); // Allocate memory for each vector on host h_a = (float*)malloc(bytes); h_b = (float*)malloc(bytes); h_c = (float*)malloc(bytes); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < N; i++ ) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } // Copy host vectors to device hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float)N/blockSize); // Execute the kernel hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c); // Copy array back to host hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error float sum = 0; for(i=0; i< N; i++) { sum += h_c[i]; //printf("h_c[%d]=%f\n",i,h_c[i]); } //printf("Sum is %f\n",sum); printf("final result: %f\n", sum/N); // Release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
4bcd81e297229ca532672a2f722ce4ab1d72f94a.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #define N (1024) // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(float *a, float *b, float *c) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds // if (id < N) c[id] = a[id] + b[id]; } int main( int argc, char* argv[] ) { // Size of vectors //int n = 10000; // Host input vectors float *h_a; float *h_b; //Host output vector float *h_c; // Device input vectors float *d_a; float *d_b; //Device output vector float *d_c; // Size, in bytes, of each vector size_t bytes = N*sizeof(float); // Allocate memory for each vector on host h_a = (float*)malloc(bytes); h_b = (float*)malloc(bytes); h_c = (float*)malloc(bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < N; i++ ) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } // Copy host vectors to device cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float)N/blockSize); // Execute the kernel vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c); // Copy array back to host cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error float sum = 0; for(i=0; i< N; i++) { sum += h_c[i]; //printf("h_c[%d]=%f\n",i,h_c[i]); } //printf("Sum is %f\n",sum); printf("final result: %f\n", sum/N); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
3cbea43a66746e62431a047138cae30b4fa0ddd3.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2017 NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <optix.h> #include <optixu/optixu_math_namespace.h> #include "helpers.h" #include "random.h" using namespace optix; struct PerRayData_radiance { float3 result; float importance; int depth; }; rtDeclareVariable(float3, eye, , ); rtDeclareVariable(float3, U, , ); rtDeclareVariable(float3, V, , ); rtDeclareVariable(float3, W, , ); rtDeclareVariable(float2, time_range, , ) = { 0.0f, 0.0f }; rtDeclareVariable(float3, bad_color, , ); rtDeclareVariable(float, scene_epsilon, , ); rtBuffer<uchar4, 2> output_buffer; rtBuffer<float4, 2> accum_buffer; rtBuffer<unsigned, 1> timeview_min_max; rtDeclareVariable(int, do_timeview,, ) = 0; rtDeclareVariable(rtObject, top_object, , ); rtDeclareVariable(unsigned int, radiance_ray_type, , ); rtDeclareVariable(unsigned int, frame, , ); rtDeclareVariable(uint2, launch_index, rtLaunchIndex, ); RT_PROGRAM void pinhole_camera() { clock_t t0 = do_timeview ? clock() : 0.0f; size_t2 screen = output_buffer.size(); unsigned int seed = tea<16>(screen.x*launch_index.y+launch_index.x, frame); // Subpixel jitter: send the ray through a different position inside the pixel each frame // to provide antialiasing. float2 subpixel_jitter = frame == 0 ? make_float2(0.0f, 0.0f) : make_float2(rnd( seed ) - 0.5f, rnd( seed ) - 0.5f); const float current_time = lerp( time_range.x, time_range.y, rnd( seed ) ); // Set up eye ray float2 d = (make_float2(launch_index) + subpixel_jitter) / make_float2(screen) * 2.f - 1.f; float3 ray_origin = eye; float3 ray_direction = normalize(d.x*U + d.y*V + W); optix::Ray ray(ray_origin, ray_direction, radiance_ray_type, scene_epsilon ); // Shade PerRayData_radiance prd; prd.importance = 1.f; prd.depth = 0; rtTrace(top_object, ray, current_time, prd); // Optionally measure pixel time and replace shaded color with time float3 result = prd.result; if ( do_timeview ) { clock_t t1 = clock(); float pixel_time = max( 0.0f, float(t1 - t0) ); result = make_float3( pixel_time ); } // Accumulate color (shaded result or time) by averaging subpixels float4 acc_val = accum_buffer[launch_index]; if( frame > 0 ) { acc_val = lerp( acc_val, make_float4( result, 0.f), 1.0f / static_cast<float>( frame+1 ) ); } else { acc_val = make_float4( result, 0.f ); } output_buffer[launch_index] = make_color( make_float3( acc_val ) ); accum_buffer[launch_index] = acc_val; // Update atomic min/max time values needed for colormap post-processing. if ( do_timeview ) { // can use integer comparison on float >= 0 atomicMin( &timeview_min_max[0], float_as_int(acc_val.x) ); atomicMax( &timeview_min_max[1], float_as_int(acc_val.x) ); } } rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> colormap_sampler; // Map a time value in 0+ range to a color value using a texture lookup. RT_PROGRAM void colormap() { // Assume time value is stored in accum buffer const float4 acc_val = accum_buffer[launch_index]; // Normalize using precomputed min and max const float min_time = __int_as_float( timeview_min_max[0] ); const float max_time = __int_as_float( timeview_min_max[1] ); const float t = ( acc_val.x - min_time ) / ( max_time - min_time ); // Map to color const float4 c = tex2D( colormap_sampler, t, 0.0f ); output_buffer[launch_index] = make_color( make_float3( c ) ); } RT_PROGRAM void exception() { const unsigned int code = rtGetExceptionCode(); rtPrintf( "Caught exception 0x%X at launch index (%d,%d)\n", code, launch_index.x, launch_index.y ); output_buffer[launch_index] = make_color( bad_color ); }
3cbea43a66746e62431a047138cae30b4fa0ddd3.cu
/* * Copyright (c) 2017 NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <optix.h> #include <optixu/optixu_math_namespace.h> #include "helpers.h" #include "random.h" using namespace optix; struct PerRayData_radiance { float3 result; float importance; int depth; }; rtDeclareVariable(float3, eye, , ); rtDeclareVariable(float3, U, , ); rtDeclareVariable(float3, V, , ); rtDeclareVariable(float3, W, , ); rtDeclareVariable(float2, time_range, , ) = { 0.0f, 0.0f }; rtDeclareVariable(float3, bad_color, , ); rtDeclareVariable(float, scene_epsilon, , ); rtBuffer<uchar4, 2> output_buffer; rtBuffer<float4, 2> accum_buffer; rtBuffer<unsigned, 1> timeview_min_max; rtDeclareVariable(int, do_timeview,, ) = 0; rtDeclareVariable(rtObject, top_object, , ); rtDeclareVariable(unsigned int, radiance_ray_type, , ); rtDeclareVariable(unsigned int, frame, , ); rtDeclareVariable(uint2, launch_index, rtLaunchIndex, ); RT_PROGRAM void pinhole_camera() { clock_t t0 = do_timeview ? clock() : 0.0f; size_t2 screen = output_buffer.size(); unsigned int seed = tea<16>(screen.x*launch_index.y+launch_index.x, frame); // Subpixel jitter: send the ray through a different position inside the pixel each frame // to provide antialiasing. float2 subpixel_jitter = frame == 0 ? make_float2(0.0f, 0.0f) : make_float2(rnd( seed ) - 0.5f, rnd( seed ) - 0.5f); const float current_time = lerp( time_range.x, time_range.y, rnd( seed ) ); // Set up eye ray float2 d = (make_float2(launch_index) + subpixel_jitter) / make_float2(screen) * 2.f - 1.f; float3 ray_origin = eye; float3 ray_direction = normalize(d.x*U + d.y*V + W); optix::Ray ray(ray_origin, ray_direction, radiance_ray_type, scene_epsilon ); // Shade PerRayData_radiance prd; prd.importance = 1.f; prd.depth = 0; rtTrace(top_object, ray, current_time, prd); // Optionally measure pixel time and replace shaded color with time float3 result = prd.result; if ( do_timeview ) { clock_t t1 = clock(); float pixel_time = max( 0.0f, float(t1 - t0) ); result = make_float3( pixel_time ); } // Accumulate color (shaded result or time) by averaging subpixels float4 acc_val = accum_buffer[launch_index]; if( frame > 0 ) { acc_val = lerp( acc_val, make_float4( result, 0.f), 1.0f / static_cast<float>( frame+1 ) ); } else { acc_val = make_float4( result, 0.f ); } output_buffer[launch_index] = make_color( make_float3( acc_val ) ); accum_buffer[launch_index] = acc_val; // Update atomic min/max time values needed for colormap post-processing. if ( do_timeview ) { // can use integer comparison on float >= 0 atomicMin( &timeview_min_max[0], float_as_int(acc_val.x) ); atomicMax( &timeview_min_max[1], float_as_int(acc_val.x) ); } } rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> colormap_sampler; // Map a time value in 0+ range to a color value using a texture lookup. RT_PROGRAM void colormap() { // Assume time value is stored in accum buffer const float4 acc_val = accum_buffer[launch_index]; // Normalize using precomputed min and max const float min_time = __int_as_float( timeview_min_max[0] ); const float max_time = __int_as_float( timeview_min_max[1] ); const float t = ( acc_val.x - min_time ) / ( max_time - min_time ); // Map to color const float4 c = tex2D( colormap_sampler, t, 0.0f ); output_buffer[launch_index] = make_color( make_float3( c ) ); } RT_PROGRAM void exception() { const unsigned int code = rtGetExceptionCode(); rtPrintf( "Caught exception 0x%X at launch index (%d,%d)\n", code, launch_index.x, launch_index.y ); output_buffer[launch_index] = make_color( bad_color ); }
99e18ba587d87a05b334dfbc2569fef8720664e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "groupNormalizationPlugin.h" namespace nvinfer1 { namespace plugin { template <typename T, unsigned TPB> __global__ void scaleShiftChannelsInplaceKernel(T* inOut, const int ld, const float* beta, const float* gamma) { // grid is blocks x C x B // ld should be H*W // blockIdx.z = batch // blockIdx.y = channel // blockIdx.x = block per col const T b = beta[blockIdx.y]; const T g = gamma[blockIdx.y]; const int offset = (blockIdx.z * gridDim.y + blockIdx.y) * ld; const int tx = blockIdx.x * TPB + threadIdx.x; if (tx < ld) { inOut[offset + tx] = g * inOut[offset + tx] + b; } } template <typename T> hipError_t scaleShiftChannelsInplace(T* inOut, const int B, const int C, const int channelVolume, const float* beta, const float* gamma, hipStream_t stream) { constexpr int TPB = 256; const int colBlocks = (channelVolume + TPB - 1) / TPB; const dim3 grid(colBlocks, C, B); hipLaunchKernelGGL(( scaleShiftChannelsInplaceKernel<T, TPB>), dim3(grid), dim3(TPB), 0, stream, inOut, channelVolume, beta, gamma); return hipPeekAtLastError(); } template hipError_t scaleShiftChannelsInplace<float>(float* inOut, const int B, const int C, const int channelVolume, const float* beta, const float* gamma, hipStream_t stream); } /* plugin */ } /* nvinfer1 */
99e18ba587d87a05b334dfbc2569fef8720664e0.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "groupNormalizationPlugin.h" namespace nvinfer1 { namespace plugin { template <typename T, unsigned TPB> __global__ void scaleShiftChannelsInplaceKernel(T* inOut, const int ld, const float* beta, const float* gamma) { // grid is blocks x C x B // ld should be H*W // blockIdx.z = batch // blockIdx.y = channel // blockIdx.x = block per col const T b = beta[blockIdx.y]; const T g = gamma[blockIdx.y]; const int offset = (blockIdx.z * gridDim.y + blockIdx.y) * ld; const int tx = blockIdx.x * TPB + threadIdx.x; if (tx < ld) { inOut[offset + tx] = g * inOut[offset + tx] + b; } } template <typename T> cudaError_t scaleShiftChannelsInplace(T* inOut, const int B, const int C, const int channelVolume, const float* beta, const float* gamma, cudaStream_t stream) { constexpr int TPB = 256; const int colBlocks = (channelVolume + TPB - 1) / TPB; const dim3 grid(colBlocks, C, B); scaleShiftChannelsInplaceKernel<T, TPB><<<grid, TPB, 0, stream>>>(inOut, channelVolume, beta, gamma); return cudaPeekAtLastError(); } template cudaError_t scaleShiftChannelsInplace<float>(float* inOut, const int B, const int C, const int channelVolume, const float* beta, const float* gamma, cudaStream_t stream); } /* plugin */ } /* nvinfer1 */
8481d3444acb727e3b4e90a8ec15649838df12b6.hip
// !!! This is a file automatically generated by hipify!!! #include "gpu.h" // GPU void checkErrors(const char *label, const char *file, int line) { #ifdef MY_TEST hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); printf("CUDA Error: %s (at %s)\nFile:\"%s\"\nLine:\"%d\"\n\n", e, label, file, line); } err = hipGetLastError(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); printf("CUDA Error: %s (at %s)\nFile:\"%s\"\nLine:\"%d\"\n\n", e, label, file, line); fflush(stdout); } #endif } // , __device__ void device_print_error(char *error, const char *file, int line) { CUPRINTF("Error: %s\nFile: \"%s\"\nLine: %d\n\n", error, file, line); } // NaN // device_test_nan(x, __FILE__, __LINE__); __device__ void device_test_nan(double x, const char *file, int line) { #ifdef MY_TEST if ((x > 1e+30) || (x < -1 * 1e+40)) { CUPRINTF("Error: NaN\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // NaN // device_test_positive(x, __FILE__, __LINE__); __device__ void device_test_positive(double x, const char *file, int line) { #ifdef MY_TEST if ((x > 1e+30) || (x < 0)) { CUPRINTF("Error: NaN or X<0\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // [0;1] // device_test_S(x, __FILE__, __LINE__); __device__ void device_test_S(double S, const char *file, int line) { #ifdef MY_TEST if (S < 0) { CUPRINTF("Error: S<0\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } if (S > 1) { CUPRINTF("Error: S>1\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // [-100;100] // test_u(x, __FILE__, __LINE__); __device__ void device_test_u(double u, const char *file, int line) { #ifdef MY_TEST if (u < -1e8) { CUPRINTF("Error: u<-100\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } if (u > 1e8) { CUPRINTF("Error: u>100\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // [0;3000] // test_ro(x, __FILE__, __LINE__); __device__ void device_test_ro(double ro, const char *file, int line) { #ifdef MY_TEST if (ro < 0) { CUPRINTF("Error: ro < 0\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } if (ro > 3000) { CUPRINTF("Error: ro > 5000\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // , ( ) // , __device__ void device_test_arrowhead(double big, double small, const char *file, int line) { #ifdef MY_TEST_1 if (fabs(big / 30) < fabs(small)) { CUPRINTF("Warning: See task parameters.\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif }
8481d3444acb727e3b4e90a8ec15649838df12b6.cu
#include "gpu.h" // Проверка ошибок GPU void checkErrors(const char *label, const char *file, int line) { #ifdef MY_TEST cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); printf("CUDA Error: %s (at %s)\nFile:\"%s\"\nLine:\"%d\"\n\n", e, label, file, line); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); printf("CUDA Error: %s (at %s)\nFile:\"%s\"\nLine:\"%d\"\n\n", e, label, file, line); fflush(stdout); } #endif } // Функция, вызываемая при ошибке __device__ void device_print_error(char *error, const char *file, int line) { CUPRINTF("Error: %s\nFile: \"%s\"\nLine: %d\n\n", error, file, line); } // Тест на NaN // Синтаксис вызова device_test_nan(x, __FILE__, __LINE__); __device__ void device_test_nan(double x, const char *file, int line) { #ifdef MY_TEST if ((x > 1e+30) || (x < -1 * 1e+40)) { CUPRINTF("Error: NaN\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // Тест на положительное и не NaN // Синтаксис вызова device_test_positive(x, __FILE__, __LINE__); __device__ void device_test_positive(double x, const char *file, int line) { #ifdef MY_TEST if ((x > 1e+30) || (x < 0)) { CUPRINTF("Error: NaN or X<0\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // Тест на вхождение насыщенностей в [0;1] // Синтаксис вызова device_test_S(x, __FILE__, __LINE__); __device__ void device_test_S(double S, const char *file, int line) { #ifdef MY_TEST if (S < 0) { CUPRINTF("Error: S<0\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } if (S > 1) { CUPRINTF("Error: S>1\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // Тест на вхождение скоростей в [-100;100] // Синтаксис вызова test_u(x, __FILE__, __LINE__); __device__ void device_test_u(double u, const char *file, int line) { #ifdef MY_TEST if (u < -1e8) { CUPRINTF("Error: u<-100\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } if (u > 1e8) { CUPRINTF("Error: u>100\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // Тест на вхождение плотностей в [0;3000] // Синтаксис вызова test_ro(x, __FILE__, __LINE__); __device__ void device_test_ro(double ro, const char *file, int line) { #ifdef MY_TEST if (ro < 0) { CUPRINTF("Error: ro < 0\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } if (ro > 3000) { CUPRINTF("Error: ro > 5000\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif } // Функция проверяет, что первый аргумент много больше (по модулю) второго // Если это не так, печатается предупреждение __device__ void device_test_arrowhead(double big, double small, const char *file, int line) { #ifdef MY_TEST_1 if (fabs(big / 30) < fabs(small)) { CUPRINTF("Warning: See task parameters.\nFile:\"%s\"\nLine:\"%d\"\n\n", file, line); } #endif }
c7596a586e89095c1690d35ac74f816cba0b2282.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "yeti_rank_pointwise.cuh" #include "radix_sort_block.cuh" #include <catboost/cuda/cuda_lib/kernel/kernel.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> namespace NKernel { __global__ void RemoveQueryMeansImpl(const int* qids, int size, const float* queryMeans, float* approx) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { approx[tid] -= queryMeans[qids[tid]]; } } void RemoveQueryMeans(const int* qids, int size, const float* queryMeans, float* approx, TCudaStream stream) { const int blockSize = 256; const int numBlocks = (size + blockSize - 1) / blockSize; if (numBlocks > 0) { hipLaunchKernelGGL(( RemoveQueryMeansImpl), dim3(numBlocks), dim3(blockSize), 0, stream , qids, size, queryMeans, approx); } } template <ui32 BLOCK_SIZE> __device__ void YetiRankGradientSingleGroup(ui32 seed, float decaySpeed, ui32 bootstrapIter, const float* __restrict__ approx, const float* __restrict__ relev, const float* __restrict__ querywiseWeights, const int* __restrict__ qids, int size, float* approxes, volatile float* __restrict__ targetDst, volatile float* __restrict__ weightDst) { const int N = 4; ui32 srcIndex[N]; //contains offset and qid of point i16 queryBegin[N]; __shared__ float relevs[BLOCK_SIZE * 4]; // 4K { { int* queryIds = (int*) approxes; const int firstQid = __ldg(qids); for (int k = 0; k < N; k++) { int offset = threadIdx.x + k * BLOCK_SIZE; int qid = offset < size ? qids[offset] : qids[size - 1] + 1; qid -= firstQid; queryIds[offset] = qid; srcIndex[k] = offset; srcIndex[k] |= qid << 10; //first 10 bits point in group, then local qid } int* queryOffsets = (int*) relevs; queryOffsets[threadIdx.x] = size; __syncthreads(); for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; //point id if (!offset || queryIds[offset] != queryIds[offset - 1]) { const int qid = queryIds[offset]; queryOffsets[qid] = offset; } } __syncthreads(); for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; //point id int qid = queryIds[offset]; queryBegin[k] = queryOffsets[qid]; } __syncthreads(); } for (int k = 0; k < 4; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; relevs[offset] = offset < size ? relev[offset] : 1000.0f; relevs[offset] *= offset < size ? querywiseWeights[offset] : 1.0f; approxes[offset] = offset < size ? __expf(min(approx[offset], 70.0f)) : 1000.0f; } } __syncthreads(); __shared__ ui32 indices[BLOCK_SIZE * N]; for (int t = 0; t < bootstrapIter; t++) { ui32 key[N]; ui32 idx[N] = {srcIndex[0], srcIndex[1], srcIndex[2], srcIndex[3]}; for (int k = 0; k < N; k++) { float val = (idx[k] & 1023) < size ? approxes[idx[k] & 1023] : -1000.0f; const float uni = NextUniformFloat32(&seed); val *= uni / (1.000001f - uni); key[k] = __float_as_int(val); key[k] ^= (key[k] & 0x80000000) ? 0xffffffff : 0x80000000; } { RadixSortSingleBlock4<BLOCK_SIZE, false, 0, 32>((uint4&)key, (uint4&)idx, indices); RadixSortSingleBlock4<BLOCK_SIZE, true, 10, 10>((uint4&)idx, indices); } //now key[k] is idx of document on position (threadIdx.x + k * BlockSize - queryOffset) in query key[k] >> 10 for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; indices[offset] = idx[k] & 1023; } __syncthreads(); for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; const int idx1 = offset != queryBegin[k] ? (int)indices[offset - 1] : -1; const int idx2 = (int)indices[offset]; const float relev1 = idx1 != -1 ? relevs[idx1] : 0; const float relev2 = relevs[idx2]; const float approx1 = idx1 != -1 ? approxes[idx1] : 0; const float approx2 = approxes[idx2]; const float magicConst = 0.15f; //to make learning rate more comparable with pair classification const float decay = magicConst * powf(decaySpeed, offset - queryBegin[k] - 1); const float pairWeight = decay * fabs(relev1 - relev2) / bootstrapIter; const float ll = pairWeight * (relev1 > relev2 ? approx2 : -approx1) / (approx2 + approx1); // if (idx1 != -1 && idx1 < size) { weightDst[idx1] += pairWeight; targetDst[idx1] += ll; } __syncthreads(); if (idx1 != -1 && idx2 < size) { weightDst[idx2] += pairWeight; targetDst[idx2] += -ll; } __syncthreads(); } __syncthreads(); } }; template <int BLOCK_SIZE> __global__ void YetiRankGradientImpl(int seed, float decaySpeed, ui32 bootstrapIter, const ui32* queryOffsets, volatile int* qidCursor, ui32 qOffsetsBias, ui32 qCount, const int* qids, const float* approx, const float* relev, const float* querywiseWeights, ui32 size, float* targetDst, float* weightDst) { __shared__ float approxes[BLOCK_SIZE * 4]; // 4K while (true) { int taskQid = 0; int* sharedQid = (int*) approxes; int offset = 0; int nextTaskOffset = 0; if (threadIdx.x == 0) { taskQid = qidCursor[0]; while (true) { if (taskQid >= qCount) { break; } offset = queryOffsets[taskQid] - qOffsetsBias; nextTaskOffset = min(offset + 4 * BLOCK_SIZE, size); int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount; int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid); if (oldQid == taskQid) { nextTaskOffset = nextTaskQid < qCount ? queryOffsets[nextTaskQid] - qOffsetsBias : size; break; } else { taskQid = oldQid; } } } if (threadIdx.x == 0) { sharedQid[0] = taskQid; sharedQid[1] = offset; sharedQid[2] = nextTaskOffset; } __syncthreads(); taskQid = sharedQid[0]; offset = sharedQid[1]; nextTaskOffset = sharedQid[2]; __syncthreads(); if (taskQid >= qCount) { return; } //statisticians will complain :) but we don't need high-quality random generators ui32 taskSeed = 127 * taskQid + 16807 * threadIdx.x + 1; #pragma unroll 3 for (int k = 0; k < 3; ++k) { AdvanceSeed32(&taskSeed); } taskSeed += seed; #pragma unroll 3 for (int k = 0; k < 3; ++k) { AdvanceSeed32(&taskSeed); } YetiRankGradientSingleGroup<BLOCK_SIZE>(taskSeed, decaySpeed, bootstrapIter, approx + offset, relev + offset, querywiseWeights + offset, qids + offset, nextTaskOffset - offset, approxes, targetDst + offset, weightDst + offset); __syncthreads(); } } void YetiRankGradient(ui64 seed, float decaySpeed, ui32 bootstrapIter, const ui32* queryOffsets, int* qidCursor, ui32 qOffsetsBias, ui32 qCount, const int* qids, const float* approx, const float* relev, const float* querywiseWeights, ui32 size, float* targetDst, float* weightDst, TCudaStream stream) { const ui32 maxBlocksPerSm = 4; const ui32 smCount = TArchProps::SMCount(); const int blockSize = 256; FillBuffer(targetDst, 0.0f, size, stream); FillBuffer(weightDst, 0.0f, size, stream); FillBuffer(qidCursor, 0, 1, stream); int cudaSeed = ((ui32)seed) + ((ui32)(seed >> 32)); hipLaunchKernelGGL(( YetiRankGradientImpl<blockSize>), dim3(maxBlocksPerSm * smCount), dim3(blockSize), 0, stream, cudaSeed, decaySpeed, bootstrapIter, queryOffsets, qidCursor, qOffsetsBias, qCount, qids, approx, relev, querywiseWeights, size, targetDst, weightDst); } // }
c7596a586e89095c1690d35ac74f816cba0b2282.cu
#include "yeti_rank_pointwise.cuh" #include "radix_sort_block.cuh" #include <catboost/cuda/cuda_lib/kernel/kernel.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> namespace NKernel { __global__ void RemoveQueryMeansImpl(const int* qids, int size, const float* queryMeans, float* approx) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { approx[tid] -= queryMeans[qids[tid]]; } } void RemoveQueryMeans(const int* qids, int size, const float* queryMeans, float* approx, TCudaStream stream) { const int blockSize = 256; const int numBlocks = (size + blockSize - 1) / blockSize; if (numBlocks > 0) { RemoveQueryMeansImpl<<< numBlocks, blockSize, 0, stream >>> (qids, size, queryMeans, approx); } } template <ui32 BLOCK_SIZE> __device__ void YetiRankGradientSingleGroup(ui32 seed, float decaySpeed, ui32 bootstrapIter, const float* __restrict__ approx, const float* __restrict__ relev, const float* __restrict__ querywiseWeights, const int* __restrict__ qids, int size, float* approxes, volatile float* __restrict__ targetDst, volatile float* __restrict__ weightDst) { const int N = 4; ui32 srcIndex[N]; //contains offset and qid of point i16 queryBegin[N]; __shared__ float relevs[BLOCK_SIZE * 4]; // 4K { { int* queryIds = (int*) approxes; const int firstQid = __ldg(qids); for (int k = 0; k < N; k++) { int offset = threadIdx.x + k * BLOCK_SIZE; int qid = offset < size ? qids[offset] : qids[size - 1] + 1; qid -= firstQid; queryIds[offset] = qid; srcIndex[k] = offset; srcIndex[k] |= qid << 10; //first 10 bits — point in group, then local qid } int* queryOffsets = (int*) relevs; queryOffsets[threadIdx.x] = size; __syncthreads(); for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; //point id if (!offset || queryIds[offset] != queryIds[offset - 1]) { const int qid = queryIds[offset]; queryOffsets[qid] = offset; } } __syncthreads(); for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; //point id int qid = queryIds[offset]; queryBegin[k] = queryOffsets[qid]; } __syncthreads(); } for (int k = 0; k < 4; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; relevs[offset] = offset < size ? relev[offset] : 1000.0f; relevs[offset] *= offset < size ? querywiseWeights[offset] : 1.0f; approxes[offset] = offset < size ? __expf(min(approx[offset], 70.0f)) : 1000.0f; } } __syncthreads(); __shared__ ui32 indices[BLOCK_SIZE * N]; for (int t = 0; t < bootstrapIter; t++) { ui32 key[N]; ui32 idx[N] = {srcIndex[0], srcIndex[1], srcIndex[2], srcIndex[3]}; for (int k = 0; k < N; k++) { float val = (idx[k] & 1023) < size ? approxes[idx[k] & 1023] : -1000.0f; const float uni = NextUniformFloat32(&seed); val *= uni / (1.000001f - uni); key[k] = __float_as_int(val); key[k] ^= (key[k] & 0x80000000) ? 0xffffffff : 0x80000000; } { RadixSortSingleBlock4<BLOCK_SIZE, false, 0, 32>((uint4&)key, (uint4&)idx, indices); RadixSortSingleBlock4<BLOCK_SIZE, true, 10, 10>((uint4&)idx, indices); } //now key[k] is idx of document on position (threadIdx.x + k * BlockSize - queryOffset) in query key[k] >> 10 for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; indices[offset] = idx[k] & 1023; } __syncthreads(); for (int k = 0; k < N; k++) { const int offset = threadIdx.x + k * BLOCK_SIZE; const int idx1 = offset != queryBegin[k] ? (int)indices[offset - 1] : -1; const int idx2 = (int)indices[offset]; const float relev1 = idx1 != -1 ? relevs[idx1] : 0; const float relev2 = relevs[idx2]; const float approx1 = idx1 != -1 ? approxes[idx1] : 0; const float approx2 = approxes[idx2]; const float magicConst = 0.15f; //to make learning rate more comparable with pair classification const float decay = magicConst * powf(decaySpeed, offset - queryBegin[k] - 1); const float pairWeight = decay * fabs(relev1 - relev2) / bootstrapIter; const float ll = pairWeight * (relev1 > relev2 ? approx2 : -approx1) / (approx2 + approx1); // if (idx1 != -1 && idx1 < size) { weightDst[idx1] += pairWeight; targetDst[idx1] += ll; } __syncthreads(); if (idx1 != -1 && idx2 < size) { weightDst[idx2] += pairWeight; targetDst[idx2] += -ll; } __syncthreads(); } __syncthreads(); } }; template <int BLOCK_SIZE> __global__ void YetiRankGradientImpl(int seed, float decaySpeed, ui32 bootstrapIter, const ui32* queryOffsets, volatile int* qidCursor, ui32 qOffsetsBias, ui32 qCount, const int* qids, const float* approx, const float* relev, const float* querywiseWeights, ui32 size, float* targetDst, float* weightDst) { __shared__ float approxes[BLOCK_SIZE * 4]; // 4K while (true) { int taskQid = 0; int* sharedQid = (int*) approxes; int offset = 0; int nextTaskOffset = 0; if (threadIdx.x == 0) { taskQid = qidCursor[0]; while (true) { if (taskQid >= qCount) { break; } offset = queryOffsets[taskQid] - qOffsetsBias; nextTaskOffset = min(offset + 4 * BLOCK_SIZE, size); int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount; int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid); if (oldQid == taskQid) { nextTaskOffset = nextTaskQid < qCount ? queryOffsets[nextTaskQid] - qOffsetsBias : size; break; } else { taskQid = oldQid; } } } if (threadIdx.x == 0) { sharedQid[0] = taskQid; sharedQid[1] = offset; sharedQid[2] = nextTaskOffset; } __syncthreads(); taskQid = sharedQid[0]; offset = sharedQid[1]; nextTaskOffset = sharedQid[2]; __syncthreads(); if (taskQid >= qCount) { return; } //statisticians will complain :) but we don't need high-quality random generators ui32 taskSeed = 127 * taskQid + 16807 * threadIdx.x + 1; #pragma unroll 3 for (int k = 0; k < 3; ++k) { AdvanceSeed32(&taskSeed); } taskSeed += seed; #pragma unroll 3 for (int k = 0; k < 3; ++k) { AdvanceSeed32(&taskSeed); } YetiRankGradientSingleGroup<BLOCK_SIZE>(taskSeed, decaySpeed, bootstrapIter, approx + offset, relev + offset, querywiseWeights + offset, qids + offset, nextTaskOffset - offset, approxes, targetDst + offset, weightDst + offset); __syncthreads(); } } void YetiRankGradient(ui64 seed, float decaySpeed, ui32 bootstrapIter, const ui32* queryOffsets, int* qidCursor, ui32 qOffsetsBias, ui32 qCount, const int* qids, const float* approx, const float* relev, const float* querywiseWeights, ui32 size, float* targetDst, float* weightDst, TCudaStream stream) { const ui32 maxBlocksPerSm = 4; const ui32 smCount = TArchProps::SMCount(); const int blockSize = 256; FillBuffer(targetDst, 0.0f, size, stream); FillBuffer(weightDst, 0.0f, size, stream); FillBuffer(qidCursor, 0, 1, stream); int cudaSeed = ((ui32)seed) + ((ui32)(seed >> 32)); YetiRankGradientImpl<blockSize><<<maxBlocksPerSm * smCount, blockSize, 0, stream>>>(cudaSeed, decaySpeed, bootstrapIter, queryOffsets, qidCursor, qOffsetsBias, qCount, qids, approx, relev, querywiseWeights, size, targetDst, weightDst); } // }
4f0b5e6ca54c6b77911a2f248e422c83f9a1590e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/superpixel_centroid_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { __device__ double atomicAddD3(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <typename Dtype> __global__ void forward_gpu_kernel_accum( const int num_kernels, const Dtype* const sp_data, Dtype* sp_accum_data, Dtype* sp_num_data, const int num, const int channels, const int height, const int width, const int num_output); template <> __global__ void forward_gpu_kernel_accum<float>( const int num_kernels, const float* const sp_data, float* sp_accum_data, float* sp_num_data, const int num, const int channels, const int height, const int width, const int num_output){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height / channels; const int c = (index / height) % channels; const int h = index % height; const int sp_offset = ((((n * channels) + c) * height) + h) * width; const int accu_offset = (n * channels + c) * num_output * 2; const int num_offset = (n * channels + c) * num_output; for(int w = 0; w < width; w++){ const int sp_id = sp_data[sp_offset + w]; const int accu_idx = accu_offset + sp_id * 2; const int num_idx = num_offset + sp_id; atomicAdd((float*)(sp_accum_data+accu_idx), float(h)); atomicAdd((float*)(sp_accum_data+accu_idx+1), float(w)); atomicAdd((float*)(sp_num_data+num_idx), float(1)); } } } template <> __global__ void forward_gpu_kernel_accum<double>( const int num_kernels, const double* const sp_data, double* sp_accum_data, double* sp_num_data, const int num, const int channels, const int height, const int width, const int num_output){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height / channels; const int c = (index / height) % channels; const int h = index % height; const int sp_offset = ((((n * channels) + c) * height) + h) * width; const int accu_offset = (n * channels + c) * num_output * 2; const int num_offset = (n * channels + c) * num_output; for(int w = 0; w < width; w++){ const int sp_id = sp_data[sp_offset + w]; const int accu_idx = accu_offset + sp_id * 2; const int num_idx = num_offset + sp_id; atomicAddD3((double*)(sp_accum_data+accu_idx), double(h)); atomicAddD3((double*)(sp_accum_data+accu_idx+1), double(w)); atomicAddD3((double*)(sp_num_data+num_idx), double(1)); } } } template <typename Dtype> __global__ void forward_gpu_kernel_average( const int num_kernels, Dtype* top_data, const Dtype* const sp_accum_data, const Dtype* const sp_num_data, const int num, const int channels, const int num_output, const bool normalize, const Dtype height, const Dtype width){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / channels / num_output; const int c = (index / num_output) % channels; const int h = index % num_output; const int top_offset = ((n * channels + c ) * num_output + h )* 2; const int num_offset = (n * channels + c ) * num_output + h; if(normalize){ top_data[top_offset] = sp_accum_data[top_offset] / sp_num_data[num_offset] / height; top_data[top_offset+1] = sp_accum_data[top_offset+1] / sp_num_data[num_offset] / width; }else{ top_data[top_offset] = sp_accum_data[top_offset] / sp_num_data[num_offset]; top_data[top_offset+1] = sp_accum_data[top_offset+1] / sp_num_data[num_offset]; } } } template <typename Dtype> void SuperpixelCentroidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* sp_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* sp_accum_data = sp_accum_.mutable_gpu_data(); Dtype* sp_num_data = sp_num_.mutable_gpu_data(); const int num = bottom[0]->num(); const int channels = bottom[0]->channels(); const int height = bottom[0]->height(); const int width = bottom[0]->width(); // Check the max id of superpixel map const int max_id = int(caffe_gpu_amax(bottom[0]->count(), sp_data)); if(max_id + 1 != num_output_){ if (check_){ LOG(FATAL) << "The num_output and max superpixel+1 not match: "<<num_output_<<" vs "<<max_id+1; }else{ LOG(WARNING) << "The num_output and max superpixel+1 not match: "<<num_output_<<" vs "<<max_id+1; } } // Clear the sp_accum_ and sp_num_ caffe_gpu_set(sp_accum_.count(), Dtype(0), sp_accum_data); caffe_gpu_set(sp_num_.count(), Dtype(0), sp_num_data); // Accumulate the pixel coordinates const int num_kernels = num * channels * height; hipLaunchKernelGGL(( forward_gpu_kernel_accum<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, sp_data, sp_accum_data, sp_num_data, num, channels, height, width, num_output_); // Average the accumulation to calc the central coordinates const int num_kernels2 = num * channels * num_output_; hipLaunchKernelGGL(( forward_gpu_kernel_average), dim3(CAFFE_GET_BLOCKS(num_kernels2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels2, top_data, sp_accum_data, sp_num_data, num, channels, num_output_, normalize_, height_, width_); } INSTANTIATE_LAYER_GPU_FUNCS(SuperpixelCentroidLayer); } // namespace caffe
4f0b5e6ca54c6b77911a2f248e422c83f9a1590e.cu
#include <vector> #include "caffe/layers/superpixel_centroid_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { __device__ double atomicAddD3(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <typename Dtype> __global__ void forward_gpu_kernel_accum( const int num_kernels, const Dtype* const sp_data, Dtype* sp_accum_data, Dtype* sp_num_data, const int num, const int channels, const int height, const int width, const int num_output); template <> __global__ void forward_gpu_kernel_accum<float>( const int num_kernels, const float* const sp_data, float* sp_accum_data, float* sp_num_data, const int num, const int channels, const int height, const int width, const int num_output){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height / channels; const int c = (index / height) % channels; const int h = index % height; const int sp_offset = ((((n * channels) + c) * height) + h) * width; const int accu_offset = (n * channels + c) * num_output * 2; const int num_offset = (n * channels + c) * num_output; for(int w = 0; w < width; w++){ const int sp_id = sp_data[sp_offset + w]; const int accu_idx = accu_offset + sp_id * 2; const int num_idx = num_offset + sp_id; atomicAdd((float*)(sp_accum_data+accu_idx), float(h)); atomicAdd((float*)(sp_accum_data+accu_idx+1), float(w)); atomicAdd((float*)(sp_num_data+num_idx), float(1)); } } } template <> __global__ void forward_gpu_kernel_accum<double>( const int num_kernels, const double* const sp_data, double* sp_accum_data, double* sp_num_data, const int num, const int channels, const int height, const int width, const int num_output){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height / channels; const int c = (index / height) % channels; const int h = index % height; const int sp_offset = ((((n * channels) + c) * height) + h) * width; const int accu_offset = (n * channels + c) * num_output * 2; const int num_offset = (n * channels + c) * num_output; for(int w = 0; w < width; w++){ const int sp_id = sp_data[sp_offset + w]; const int accu_idx = accu_offset + sp_id * 2; const int num_idx = num_offset + sp_id; atomicAddD3((double*)(sp_accum_data+accu_idx), double(h)); atomicAddD3((double*)(sp_accum_data+accu_idx+1), double(w)); atomicAddD3((double*)(sp_num_data+num_idx), double(1)); } } } template <typename Dtype> __global__ void forward_gpu_kernel_average( const int num_kernels, Dtype* top_data, const Dtype* const sp_accum_data, const Dtype* const sp_num_data, const int num, const int channels, const int num_output, const bool normalize, const Dtype height, const Dtype width){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / channels / num_output; const int c = (index / num_output) % channels; const int h = index % num_output; const int top_offset = ((n * channels + c ) * num_output + h )* 2; const int num_offset = (n * channels + c ) * num_output + h; if(normalize){ top_data[top_offset] = sp_accum_data[top_offset] / sp_num_data[num_offset] / height; top_data[top_offset+1] = sp_accum_data[top_offset+1] / sp_num_data[num_offset] / width; }else{ top_data[top_offset] = sp_accum_data[top_offset] / sp_num_data[num_offset]; top_data[top_offset+1] = sp_accum_data[top_offset+1] / sp_num_data[num_offset]; } } } template <typename Dtype> void SuperpixelCentroidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* sp_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* sp_accum_data = sp_accum_.mutable_gpu_data(); Dtype* sp_num_data = sp_num_.mutable_gpu_data(); const int num = bottom[0]->num(); const int channels = bottom[0]->channels(); const int height = bottom[0]->height(); const int width = bottom[0]->width(); // Check the max id of superpixel map const int max_id = int(caffe_gpu_amax(bottom[0]->count(), sp_data)); if(max_id + 1 != num_output_){ if (check_){ LOG(FATAL) << "The num_output and max superpixel+1 not match: "<<num_output_<<" vs "<<max_id+1; }else{ LOG(WARNING) << "The num_output and max superpixel+1 not match: "<<num_output_<<" vs "<<max_id+1; } } // Clear the sp_accum_ and sp_num_ caffe_gpu_set(sp_accum_.count(), Dtype(0), sp_accum_data); caffe_gpu_set(sp_num_.count(), Dtype(0), sp_num_data); // Accumulate the pixel coordinates const int num_kernels = num * channels * height; forward_gpu_kernel_accum<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, sp_data, sp_accum_data, sp_num_data, num, channels, height, width, num_output_); // Average the accumulation to calc the central coordinates const int num_kernels2 = num * channels * num_output_; forward_gpu_kernel_average<<<CAFFE_GET_BLOCKS(num_kernels2), CAFFE_CUDA_NUM_THREADS>>>( num_kernels2, top_data, sp_accum_data, sp_num_data, num, channels, num_output_, normalize_, height_, width_); } INSTANTIATE_LAYER_GPU_FUNCS(SuperpixelCentroidLayer); } // namespace caffe
8660a67b42f48023698a35ffb8a0cba1284a3da1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" #include "oneflow/core/common/data_type.h" #include "oneflow/core/kernel/util/cuda_half_util.h" #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/core/operator/operator_util.h" #include "oneflow/user/utils/pool_util.h" #include <algorithm> #include <cfloat> #include <cmath> namespace oneflow { namespace user_op { #define START_IND(a, b, c) (int)::floor((float)(a * c) / b) #define END_IND(a, b, c) (int)::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a, b, c) ((a * c) / b) #define END_IND_INT(a, b, c) (((a + 1) * c + b - 1) / b) template<typename T> __global__ void InitPtr(int elements, T* ptr) { int gid = (blockDim.x * blockIdx.x) + threadIdx.x; int step = gridDim.x * blockDim.x; while (gid < elements) { ptr[gid] = static_cast<T>(0); gid += step; } } inline Shape GetShape5D(const Shape& shape, const std::string& data_format, int32_t dim) { FixedDimVector shape_3d = {GetInDim(shape, data_format, 0, dim), GetInDim(shape, data_format, 1, dim), GetInDim(shape, data_format, 2, dim)}; return Shape({shape.At(0), shape.At(1), shape_3d.at(0), shape_3d.at(1), shape_3d.at(2)}); } template<typename T> __global__ void AdaptiveAvgPoolCudaKernel(const T* input, T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T* in_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; T sum = static_cast<T>(0); for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { T val = *(in_ptr + ih * in_w + iw); sum += val; } } in_ptr += in_h * in_w; // next input depth } // Update output output[idx] = sum / k_d / k_h / k_w; } } template<typename T> __global__ void AdaptiveAvgPoolGradCudaKernel(T* input, const T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T grad_delta = output[idx] / k_d / k_h / k_w; T* input_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { // TODO (Tianyu): Use 'atmoic::Add' when necessary cuda::atomic::Add(input_ptr + ih * in_w + iw, grad_delta); } } input_ptr += in_h * in_w; // next input depth } } } template<typename T> void AvgForwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("x", 0); Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("y", 0); const T* in_ptr = in_tensor->dptr<T>(); T* out_ptr = out_tensor->mut_dptr<T>(); const Shape& x_shape = ctx->TensorDesc4ArgNameAndIndex("x", 0)->shape(); const Shape& y_shape = ctx->TensorDesc4ArgNameAndIndex("y", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(x_shape, data_format, dim); const Shape& out = GetShape5D(y_shape, data_format, dim); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((AdaptiveAvgPoolCudaKernel<T>), ctx->device_ctx(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<typename T> void AvgBackwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0); Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0); const T* out_ptr = out_tensor->dptr<T>(); T* in_ptr = in_tensor->mut_dptr<T>(); const Shape& dx_shape = ctx->TensorDesc4ArgNameAndIndex("dx", 0)->shape(); const Shape& dy_shape = ctx->TensorDesc4ArgNameAndIndex("dy", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(dx_shape, data_format, dim); const Shape& out = GetShape5D(dy_shape, data_format, dim); const int in_elems = in_tensor->shape().elem_cnt(); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((InitPtr<T>), ctx->device_ctx(), in_elems, in_elems, in_ptr); RUN_CUDA_KERNEL((AdaptiveAvgPoolGradCudaKernel<T>), ctx->device_ctx(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dKernel() = default; ~GpuAdaptiveAvgPool1dKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dKernel() = default; ~GpuAdaptiveAvgPool2dKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dKernel() = default; ~GpuAdaptiveAvgPool3dKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dGradKernel() = default; ~GpuAdaptiveAvgPool1dGradKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dGradKernel() = default; ~GpuAdaptiveAvgPool2dGradKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dGradKernel() = default; ~GpuAdaptiveAvgPool3dGradKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d") \ .SetCreateFn<GpuAdaptiveAvgPool1dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d") \ .SetCreateFn<GpuAdaptiveAvgPool2dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d") \ .SetCreateFn<GpuAdaptiveAvgPool3dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("y", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int); #define REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool1dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool2dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool3dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("dx", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int); } // namespace user_op } // namespace oneflow
8660a67b42f48023698a35ffb8a0cba1284a3da1.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" #include "oneflow/core/common/data_type.h" #include "oneflow/core/kernel/util/cuda_half_util.h" #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/core/operator/operator_util.h" #include "oneflow/user/utils/pool_util.h" #include <algorithm> #include <cfloat> #include <cmath> namespace oneflow { namespace user_op { #define START_IND(a, b, c) (int)std::floor((float)(a * c) / b) #define END_IND(a, b, c) (int)std::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a, b, c) ((a * c) / b) #define END_IND_INT(a, b, c) (((a + 1) * c + b - 1) / b) template<typename T> __global__ void InitPtr(int elements, T* ptr) { int gid = (blockDim.x * blockIdx.x) + threadIdx.x; int step = gridDim.x * blockDim.x; while (gid < elements) { ptr[gid] = static_cast<T>(0); gid += step; } } inline Shape GetShape5D(const Shape& shape, const std::string& data_format, int32_t dim) { FixedDimVector shape_3d = {GetInDim(shape, data_format, 0, dim), GetInDim(shape, data_format, 1, dim), GetInDim(shape, data_format, 2, dim)}; return Shape({shape.At(0), shape.At(1), shape_3d.at(0), shape_3d.at(1), shape_3d.at(2)}); } template<typename T> __global__ void AdaptiveAvgPoolCudaKernel(const T* input, T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T* in_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; T sum = static_cast<T>(0); for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { T val = *(in_ptr + ih * in_w + iw); sum += val; } } in_ptr += in_h * in_w; // next input depth } // Update output output[idx] = sum / k_d / k_h / k_w; } } template<typename T> __global__ void AdaptiveAvgPoolGradCudaKernel(T* input, const T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T grad_delta = output[idx] / k_d / k_h / k_w; T* input_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { // TODO (Tianyu): Use 'atmoic::Add' when necessary cuda::atomic::Add(input_ptr + ih * in_w + iw, grad_delta); } } input_ptr += in_h * in_w; // next input depth } } } template<typename T> void AvgForwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("x", 0); Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("y", 0); const T* in_ptr = in_tensor->dptr<T>(); T* out_ptr = out_tensor->mut_dptr<T>(); const Shape& x_shape = ctx->TensorDesc4ArgNameAndIndex("x", 0)->shape(); const Shape& y_shape = ctx->TensorDesc4ArgNameAndIndex("y", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(x_shape, data_format, dim); const Shape& out = GetShape5D(y_shape, data_format, dim); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((AdaptiveAvgPoolCudaKernel<T>), ctx->device_ctx(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<typename T> void AvgBackwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0); Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0); const T* out_ptr = out_tensor->dptr<T>(); T* in_ptr = in_tensor->mut_dptr<T>(); const Shape& dx_shape = ctx->TensorDesc4ArgNameAndIndex("dx", 0)->shape(); const Shape& dy_shape = ctx->TensorDesc4ArgNameAndIndex("dy", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(dx_shape, data_format, dim); const Shape& out = GetShape5D(dy_shape, data_format, dim); const int in_elems = in_tensor->shape().elem_cnt(); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((InitPtr<T>), ctx->device_ctx(), in_elems, in_elems, in_ptr); RUN_CUDA_KERNEL((AdaptiveAvgPoolGradCudaKernel<T>), ctx->device_ctx(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dKernel() = default; ~GpuAdaptiveAvgPool1dKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dKernel() = default; ~GpuAdaptiveAvgPool2dKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dKernel() = default; ~GpuAdaptiveAvgPool3dKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dGradKernel() = default; ~GpuAdaptiveAvgPool1dGradKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dGradKernel() = default; ~GpuAdaptiveAvgPool2dGradKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dGradKernel() = default; ~GpuAdaptiveAvgPool3dGradKernel() = default; private: void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d") \ .SetCreateFn<GpuAdaptiveAvgPool1dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d") \ .SetCreateFn<GpuAdaptiveAvgPool2dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d") \ .SetCreateFn<GpuAdaptiveAvgPool3dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("y", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int); #define REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool1dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool2dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool3dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceTag() == device) \ & (HobDataType("dx", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int); } // namespace user_op } // namespace oneflow
6e62bb4a87fb56f10f22f38bbb9cafc4d39f386a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zgeelltmv.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELL SpMV kernel //Michael Garland template<bool betazero> __global__ void dgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, double alpha, double * dval, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; //if ( val != MAGMA_D_ZERO ) dot += val * dx[col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // shifted ELL SpMV kernel //Michael Garland __global__ void dgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, double alpha, double lambda, double * dval, magma_index_t * dcolind, double * dx, double beta, int offset, int blocksize, magma_index_t * addrows, double * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( dgeelltmv_kernel<true>), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { hipLaunchKernelGGL(( dgeelltmv_kernel<false>), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] lambda double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, double alpha, double lambda, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; double tmp_shift; //magma_dsetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; hipLaunchKernelGGL(( dgeelltmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
6e62bb4a87fb56f10f22f38bbb9cafc4d39f386a.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zgeelltmv.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELL SpMV kernel //Michael Garland template<bool betazero> __global__ void dgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, double alpha, double * dval, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; //if ( val != MAGMA_D_ZERO ) dot += val * dx[col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // shifted ELL SpMV kernel //Michael Garland __global__ void dgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, double alpha, double lambda, double * dval, magma_index_t * dcolind, double * dx, double beta, int offset, int blocksize, magma_index_t * addrows, double * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; if (beta == MAGMA_D_ZERO) { dgeelltmv_kernel<true><<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { dgeelltmv_kernel<false><<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] lambda double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, double alpha, double lambda, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; double tmp_shift; //magma_dsetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; dgeelltmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
9b89b5ec387ad69512340c2f92ef42aa8948c114.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> #define DIM 128 /* * An example of using shared memory to optimize performance of a parallel * reduction by constructing partial results for a thread block in shared memory * before flushing to global memory. */ extern __shared__ int dsmem[]; // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { if (size == 1) return data[0]; int const stride = size / 2; for (int i = 0; i < stride; i++) data[i] += data[i + stride]; return recursiveReduce(data, stride); } // unroll4 + complete unroll for loop + gmem __global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n) return; // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n) { __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; // boundary check unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n) return; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // set to smem by each threads smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in shared memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } __global__ void reduceSmemDyn(int *g_idata, int *g_odata, unsigned int n) { extern __shared__ int smem[]; // set thread ID unsigned int tid = threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; // set to smem by each threads smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } // unroll4 + complete unroll for loop + gmem __global__ void reduceGmemUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceSmemUnroll(int *g_idata, int *g_odata, unsigned int n) { // static shared memory __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; // global index, 4 blocks of input data processed at a time unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // unrolling 4 blocks int tmpSum = 0; // boundary check if (idx + 4 * blockDim.x <= n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; tmpSum = a1 + a2 + a3 + a4; } smem[tid] = tmpSum; __syncthreads(); // in-place reduction in shared memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } __global__ void reduceSmemUnrollDyn(int *g_idata, int *g_odata, unsigned int n) { extern __shared__ int smem[]; // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // unrolling 4 int tmpSum = 0; if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; tmpSum = a1 + a2 + a3 + a4; } smem[tid] = tmpSum; __syncthreads(); // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } __global__ void reduceNeighboredGmem(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceNeighboredSmem(int *g_idata, int *g_odata, unsigned int n) { __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { smem[tid] += smem[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = DIM; // initial block size dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) h_idata[i] = (int)( rand() & 0xFF ); memcpy (tmp, h_idata, bytes); int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(hipMalloc((void **) &d_idata, bytes)); CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction int cpu_sum = recursiveReduce (tmp, size); printf("cpu reduce : %d\n", cpu_sum); // reduce gmem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceNeighboredGmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceNeighboredGmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce gmem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceNeighboredSmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceNeighboredSmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce gmem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceGmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceGmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce smem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceSmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceSmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce smem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceSmemDyn), dim3(grid.x), dim3(block), blocksize*sizeof(int), 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceSmemDyn : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce gmem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceGmemUnroll), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("reduceGmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x / 4, block.x); // reduce smem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceSmemUnroll), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("reduceSmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x / 4, block.x); // reduce smem CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceSmemUnrollDyn), dim3(grid.x / 4), dim3(block), DIM*sizeof(int), 0, d_idata, d_odata, size); CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("reduceSmemDynUnroll4: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x / 4, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(hipFree(d_idata)); CHECK(hipFree(d_odata)); // reset device CHECK(hipDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
9b89b5ec387ad69512340c2f92ef42aa8948c114.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> #define DIM 128 /* * An example of using shared memory to optimize performance of a parallel * reduction by constructing partial results for a thread block in shared memory * before flushing to global memory. */ extern __shared__ int dsmem[]; // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { if (size == 1) return data[0]; int const stride = size / 2; for (int i = 0; i < stride; i++) data[i] += data[i + stride]; return recursiveReduce(data, stride); } // unroll4 + complete unroll for loop + gmem __global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n) return; // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n) { __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; // boundary check unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n) return; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // set to smem by each threads smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in shared memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } __global__ void reduceSmemDyn(int *g_idata, int *g_odata, unsigned int n) { extern __shared__ int smem[]; // set thread ID unsigned int tid = threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; // set to smem by each threads smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } // unroll4 + complete unroll for loop + gmem __global__ void reduceGmemUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceSmemUnroll(int *g_idata, int *g_odata, unsigned int n) { // static shared memory __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; // global index, 4 blocks of input data processed at a time unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // unrolling 4 blocks int tmpSum = 0; // boundary check if (idx + 4 * blockDim.x <= n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; tmpSum = a1 + a2 + a3 + a4; } smem[tid] = tmpSum; __syncthreads(); // in-place reduction in shared memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } __global__ void reduceSmemUnrollDyn(int *g_idata, int *g_odata, unsigned int n) { extern __shared__ int smem[]; // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // unrolling 4 int tmpSum = 0; if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; tmpSum = a1 + a2 + a3 + a4; } smem[tid] = tmpSum; __syncthreads(); // in-place reduction in global memory if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = smem; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } __global__ void reduceNeighboredGmem(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceNeighboredSmem(int *g_idata, int *g_odata, unsigned int n) { __shared__ int smem[DIM]; // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; smem[tid] = idata[tid]; __syncthreads(); // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { smem[tid] += smem[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = smem[0]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = DIM; // initial block size dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) h_idata[i] = (int)( rand() & 0xFF ); memcpy (tmp, h_idata, bytes); int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(cudaMalloc((void **) &d_idata, bytes)); CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction int cpu_sum = recursiveReduce (tmp, size); printf("cpu reduce : %d\n", cpu_sum); // reduce gmem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceNeighboredGmem<<<grid.x, block>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceNeighboredGmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce gmem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceNeighboredSmem<<<grid.x, block>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceNeighboredSmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce gmem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceGmem<<<grid.x, block>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceGmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce smem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceSmem<<<grid.x, block>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceSmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce smem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceSmemDyn<<<grid.x, block, blocksize*sizeof(int)>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("reduceSmemDyn : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x); // reduce gmem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceGmemUnroll<<<grid.x / 4, block>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("reduceGmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x / 4, block.x); // reduce smem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceSmemUnroll<<<grid.x / 4, block>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("reduceSmemUnroll4 : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x / 4, block.x); // reduce smem CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); reduceSmemUnrollDyn<<<grid.x / 4, block, DIM*sizeof(int)>>>(d_idata, d_odata, size); CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("reduceSmemDynUnroll4: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x / 4, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(cudaFree(d_idata)); CHECK(cudaFree(d_odata)); // reset device CHECK(cudaDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
5bd0496cb0ec98c9328936c5ca3a022e86101f64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S3_6.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5110986392742,0.00130591158765005,0.778304597988111,0.778190083712180,0.000176141600174844,0.484495378655116,0.00295228963782625,0.999998329695130,1.95198204949961e-08,1.90553223501749e-05,0.999768478047086,1.00656738617877,0.999980520529342,5.74063440693430e-05,0.608088033062619,9.96205488133323,139.557924801650}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.2952631571165,0.000223357550203231,0.000139823866607541,0.000468830572859158,0.267957668347321,0.123807265230240,0.209206424884521,4.97611368106475,0.0181339958455722,1.93368689237664,1099.98460468133,0.000558564959599142,0.298337407980113,0.0142073923928152,0.00109951928325625,6.37440120865430e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
5bd0496cb0ec98c9328936c5ca3a022e86101f64.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S3_6.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5110986392742,0.00130591158765005,0.778304597988111,0.778190083712180,0.000176141600174844,0.484495378655116,0.00295228963782625,0.999998329695130,1.95198204949961e-08,1.90553223501749e-05,0.999768478047086,1.00656738617877,0.999980520529342,5.74063440693430e-05,0.608088033062619,9.96205488133323,139.557924801650}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.2952631571165,0.000223357550203231,0.000139823866607541,0.000468830572859158,0.267957668347321,0.123807265230240,0.209206424884521,4.97611368106475,0.0181339958455722,1.93368689237664,1099.98460468133,0.000558564959599142,0.298337407980113,0.0142073923928152,0.00109951928325625,6.37440120865430e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
de634d6d88444a6a29a978a4393933687a6cea58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "billow.cuh" #include "..\..\cpp\modules\generators\Billow.h" __device__ float billow2D_Simplex(float2 point, float freq, float lacun, float persist, int init_seed, int octaves) { float result = 0.0f; float amplitude = 1.0f; // Scale starting point by frequency. point.x = point.x * freq; point.y = point.y * freq; // Use loop for fractal octave bit for (size_t i = 0; i < octaves; ++i) { int seed = (init_seed + i) & 0xffffffff; result += fabsf(simplex2d(point.x, point.y, seed, nullptr)) * amplitude; point.x *= lacun; point.y *= lacun; amplitude *= persist; } //result /= 100.0f; return result; } __device__ float billow2D(float2 point, float freq, float lacun, float persist, int init_seed, int octaves) { // Will be incremented upon. float result = 0.0f; float amplitude = 1.0f; // Scale point by freq point.x = point.x * freq; point.y = point.y * freq; // TODO: Seeding the function is currently pointless and doesn't actually do anything. // Use loop for octav-ing for (size_t i = 0; i < octaves; ++i) { int seed = (init_seed + i) & 0xffffffff; result += fabsf(perlin2d(point.x, point.y, seed, nullptr)) * amplitude; // Modify vars for next octave. point.x *= lacun; point.y *= lacun; amplitude *= persist; } // float tmp = result / 100.0f; // * // return result; } __device__ float billow3D(float3 point, const float freq, const float lacun, const float persist, const int init_seed, const int octaves) { float result = 0.0f; float amplitude = 1.0f; point *= freq; for (short i = 0; i < octaves; ++i) { int seed = (init_seed + i) & 0xffffffff; result += fabsf(simplex3d(point.x, point.y, point.z, seed, nullptr)) * amplitude; point *= lacun; amplitude *= persist; } return result; } __global__ void Billow2DKernel(float* output, int width, int height, cnoise::noise_t noise_type, float2 origin, float freq, float lacun, float persist, int seed, int octaves) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < width && j < height) { float x, y; x = i + origin.x; y = j + origin.y; float2 p = make_float2(x, y); // Call billow function float val; switch (noise_type) { case(cnoise::noise_t::PERLIN): { val = billow2D(p, freq, lacun, persist, seed, octaves); break; } case(cnoise::noise_t::SIMPLEX): { val = billow2D_Simplex(p, freq, lacun, persist, seed, octaves); break; } } // Write val to the surface output[(j * width) + i] = val; } } __global__ void Billow3DKernel(cnoise::Point* coords, const int width, const int height, const float freq, const float lacun, const float persist, const int seed, const int octaves) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= width || j >= height) { return; } coords[i + (j * width)].Value = billow3D(coords[i + (j * width)].Position, freq, lacun, persist, seed, octaves); } void BillowLauncher2D(float* out, int width, int height, cnoise::noise_t noise_type, float2 origin, float freq, float lacun, float persist, int seed, int octaves) { #ifdef CUDA_KERNEL_TIMING hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); #endif // CUDA_KERNEL_TIMING dim3 threadsPerBlock(8, 8); dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y); hipLaunchKernelGGL(( Billow2DKernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, out, width, height, noise_type, origin, freq, lacun, persist, seed, octaves); hipError_t err = hipGetLastError(); cudaAssert(err); // Synchronize device err = hipDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING hipEventRecord(stop); hipEventSynchronize(stop); float elapsed = 0.0f; hipEventElapsedTime(&elapsed, start, stop); printf("Billow Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING // If this completes, kernel is done and "output" contains correct data. } void BillowLauncher3D(cnoise::Point* coords, const int width, const int height, const float freq, const float lacun, const float persist, const int seed, const int octaves) { #ifdef CUDA_KERNEL_TIMING hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); #endif // CUDA_KERNEL_TIMING dim3 threadsPerBlock(8, 8, 1); dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y, 1); hipLaunchKernelGGL(( Billow3DKernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, coords, width, height, freq, lacun, persist, seed, octaves); hipError_t err = hipGetLastError(); cudaAssert(err); // Synchronize device err = hipDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING hipEventRecord(stop); hipEventSynchronize(stop); float elapsed = 0.0f; hipEventElapsedTime(&elapsed, start, stop); printf("Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING }
de634d6d88444a6a29a978a4393933687a6cea58.cu
#include "billow.cuh" #include "..\..\cpp\modules\generators\Billow.h" __device__ float billow2D_Simplex(float2 point, float freq, float lacun, float persist, int init_seed, int octaves) { float result = 0.0f; float amplitude = 1.0f; // Scale starting point by frequency. point.x = point.x * freq; point.y = point.y * freq; // Use loop for fractal octave bit for (size_t i = 0; i < octaves; ++i) { int seed = (init_seed + i) & 0xffffffff; result += fabsf(simplex2d(point.x, point.y, seed, nullptr)) * amplitude; point.x *= lacun; point.y *= lacun; amplitude *= persist; } //result /= 100.0f; return result; } __device__ float billow2D(float2 point, float freq, float lacun, float persist, int init_seed, int octaves) { // Will be incremented upon. float result = 0.0f; float amplitude = 1.0f; // Scale point by freq point.x = point.x * freq; point.y = point.y * freq; // TODO: Seeding the function is currently pointless and doesn't actually do anything. // Use loop for octav-ing for (size_t i = 0; i < octaves; ++i) { int seed = (init_seed + i) & 0xffffffff; result += fabsf(perlin2d(point.x, point.y, seed, nullptr)) * amplitude; // Modify vars for next octave. point.x *= lacun; point.y *= lacun; amplitude *= persist; } // float tmp = result / 100.0f; // * // return result; } __device__ float billow3D(float3 point, const float freq, const float lacun, const float persist, const int init_seed, const int octaves) { float result = 0.0f; float amplitude = 1.0f; point *= freq; for (short i = 0; i < octaves; ++i) { int seed = (init_seed + i) & 0xffffffff; result += fabsf(simplex3d(point.x, point.y, point.z, seed, nullptr)) * amplitude; point *= lacun; amplitude *= persist; } return result; } __global__ void Billow2DKernel(float* output, int width, int height, cnoise::noise_t noise_type, float2 origin, float freq, float lacun, float persist, int seed, int octaves) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < width && j < height) { float x, y; x = i + origin.x; y = j + origin.y; float2 p = make_float2(x, y); // Call billow function float val; switch (noise_type) { case(cnoise::noise_t::PERLIN): { val = billow2D(p, freq, lacun, persist, seed, octaves); break; } case(cnoise::noise_t::SIMPLEX): { val = billow2D_Simplex(p, freq, lacun, persist, seed, octaves); break; } } // Write val to the surface output[(j * width) + i] = val; } } __global__ void Billow3DKernel(cnoise::Point* coords, const int width, const int height, const float freq, const float lacun, const float persist, const int seed, const int octaves) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= width || j >= height) { return; } coords[i + (j * width)].Value = billow3D(coords[i + (j * width)].Position, freq, lacun, persist, seed, octaves); } void BillowLauncher2D(float* out, int width, int height, cnoise::noise_t noise_type, float2 origin, float freq, float lacun, float persist, int seed, int octaves) { #ifdef CUDA_KERNEL_TIMING cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); #endif // CUDA_KERNEL_TIMING dim3 threadsPerBlock(8, 8); dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y); Billow2DKernel<<<numBlocks,threadsPerBlock>>>(out, width, height, noise_type, origin, freq, lacun, persist, seed, octaves); cudaError_t err = cudaGetLastError(); cudaAssert(err); // Synchronize device err = cudaDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed = 0.0f; cudaEventElapsedTime(&elapsed, start, stop); printf("Billow Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING // If this completes, kernel is done and "output" contains correct data. } void BillowLauncher3D(cnoise::Point* coords, const int width, const int height, const float freq, const float lacun, const float persist, const int seed, const int octaves) { #ifdef CUDA_KERNEL_TIMING cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); #endif // CUDA_KERNEL_TIMING dim3 threadsPerBlock(8, 8, 1); dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y, 1); Billow3DKernel<<<numBlocks, threadsPerBlock >>>(coords, width, height, freq, lacun, persist, seed, octaves); cudaError_t err = cudaGetLastError(); cudaAssert(err); // Synchronize device err = cudaDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed = 0.0f; cudaEventElapsedTime(&elapsed, start, stop); printf("Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING }
5bc2a5dfb47c82fe42a01897a3c2b76cee958b98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_utils.h> #include <gtest/gtest.h> #include <test_utils.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <cstdio> #include <limits> #include <memory> #include <utility> #include "cuml/fil/fil.h" #include "ml_utils.h" #include "random/rng.h" #include "test_utils.h" #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; struct FilTestParams { // input data parameters int num_rows; int num_cols; float nan_prob; // forest parameters int depth; int num_trees; float leaf_prob; // output parameters fil::output_t output; float threshold; float global_bias; // runtime parameters fil::algo_t algo; int seed; float tolerance; // treelite parameters, only used for treelite tests tl::Operator op; }; std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << ps.output << ", threshold = " << ps.threshold << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op); return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void SetUp() override { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(hipStreamCreate(&stream)); handle.setStream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void TearDown() override { CUDA_CHECK(hipFree(preds_d)); CUDA_CHECK(hipFree(want_preds_d)); CUDA_CHECK(hipFree(data_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data float* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data allocate(weights_d, num_nodes); allocate(thresholds_d, num_nodes); allocate(fids_d, num_nodes); allocate(def_lefts_d, num_nodes); allocate(is_leafs_d, num_nodes); // generate on-GPU random data Random::Rng r(ps.seed); r.uniform(weights_d, num_nodes, -1.0f, 1.0f, stream); r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> weights_h(num_nodes), thresholds_h(num_nodes); std::vector<int> fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; updateHost(weights_h.data(), weights_d, num_nodes, stream); updateHost(thresholds_h.data(), thresholds_d, num_nodes, stream); updateHost(fids_h.data(), fids_d, num_nodes, stream); updateHost(def_lefts_h, def_lefts_d, num_nodes, stream); updateHost(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::dense_node_init(&nodes[i], weights_h[i], thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(hipFree(is_leafs_d)); CUDA_CHECK(hipFree(def_lefts_d)); CUDA_CHECK(hipFree(fids_d)); CUDA_CHECK(hipFree(thresholds_d)); CUDA_CHECK(hipFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.num_rows * ps.num_cols; allocate(data_d, num_data); bool* mask_d = nullptr; allocate(mask_d, num_data); // generate random data Random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; hipLaunchKernelGGL(( nan_kernel), dim3(ceildiv(int(num_data), tpb)), dim3(tpb), 0, stream, data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(hipPeekAtLastError()); // copy to host data_h.resize(num_data); updateHost(data_h.data(), data_d, num_data, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // clean up CUDA_CHECK(hipFree(mask_d)); } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.num_rows); int num_nodes = tree_num_nodes(); for (int i = 0; i < ps.num_rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]); } if ((ps.output & fil::output_t::AVG) != 0) pred = pred / ps.num_trees; pred += ps.global_bias; if ((ps.output & fil::output_t::SIGMOID) != 0) pred = sigmoid(pred); if ((ps.output & fil::output_t::THRESHOLD) != 0) { pred = pred > ps.threshold ? 1.0f : 0.0f; } want_preds_h[i] = pred; } // copy to GPU allocate(want_preds_d, ps.num_rows); updateDevice(want_preds_d, want_preds_h.data(), ps.num_rows, stream); CUDA_CHECK(hipStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict allocate(preds_d, ps.num_rows); fil::predict(handle, forest, preds_d, data_d, ps.num_rows); CUDA_CHECK(hipStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { ASSERT_TRUE(devArrMatch(want_preds_d, preds_d, ps.num_rows, CompareApprox<float>(ps.tolerance), stream)); } float infer_one_tree(fil::dense_node_t* root, float* data) { int curr = 0; float output = 0.0f, threshold = 0.0f; int fid = 0; bool def_left = false, is_leaf = false; for (;;) { fil::dense_node_decode(&root[curr], &output, &threshold, &fid, &def_left, &is_leaf); if (is_leaf) break; float val = data[fid]; bool cond = isnan(val) ? !def_left : val >= threshold; curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* want_preds_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node_t> nodes; // parameters hipStream_t stream; cumlHandle handle; FilTestParams ps; }; class PredictDenseFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.depth = ps.depth; fil_ps.num_trees = ps.num_trees; fil_ps.num_cols = ps.num_cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil_ps.global_bias = ps.global_bias; fil::init_dense(handle, pforest, nodes.data(), &fil_ps); } }; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; TL_CPP_CHECK(builder->CreateNode(key)); int feature; float threshold, output; bool is_leaf, default_left; fil::dense_node_decode(&nodes[node], &output, &threshold, &feature, &default_left, &is_leaf); if (is_leaf) { TL_CPP_CHECK(builder->SetLeafNode(key, output)); } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); TL_CPP_CHECK(builder->SetNumericalTestNode( key, feature, ps.op, threshold, default_left, left_key, right_key)); } return key; } void init_forest(fil::forest_t* pforest) override { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; std::unique_ptr<tlf::ModelBuilder> model_builder( new tlf::ModelBuilder(ps.num_cols, 1, random_forest_flag)); // prediction transform if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } // global bias char* global_bias_str = nullptr; ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0, "cannot convert global_bias into a string"); model_builder->SetModelParam("global_bias", global_bias_str); free(global_bias_str); // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); TL_CPP_CHECK(tree_builder->SetRootNode(root_key)); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model(new tl::Model); TL_CPP_CHECK(model_builder->CommitModel(model.get())); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::THRESHOLD) != 0; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(hipStreamSynchronize(stream)); } }; // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance std::vector<FilTestParams> predict_dense_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f}, }; TEST_P(PredictDenseFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs)); std::vector<FilTestParams> import_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, }; TEST_P(TreeliteFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteFilTest, testing::ValuesIn(import_inputs)); } // namespace ML
5bc2a5dfb47c82fe42a01897a3c2b76cee958b98.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_utils.h> #include <gtest/gtest.h> #include <test_utils.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <cstdio> #include <limits> #include <memory> #include <utility> #include "cuml/fil/fil.h" #include "ml_utils.h" #include "random/rng.h" #include "test_utils.h" #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; struct FilTestParams { // input data parameters int num_rows; int num_cols; float nan_prob; // forest parameters int depth; int num_trees; float leaf_prob; // output parameters fil::output_t output; float threshold; float global_bias; // runtime parameters fil::algo_t algo; int seed; float tolerance; // treelite parameters, only used for treelite tests tl::Operator op; }; std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << ps.output << ", threshold = " << ps.threshold << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op); return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void SetUp() override { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(cudaStreamCreate(&stream)); handle.setStream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void TearDown() override { CUDA_CHECK(cudaFree(preds_d)); CUDA_CHECK(cudaFree(want_preds_d)); CUDA_CHECK(cudaFree(data_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data float* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data allocate(weights_d, num_nodes); allocate(thresholds_d, num_nodes); allocate(fids_d, num_nodes); allocate(def_lefts_d, num_nodes); allocate(is_leafs_d, num_nodes); // generate on-GPU random data Random::Rng r(ps.seed); r.uniform(weights_d, num_nodes, -1.0f, 1.0f, stream); r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> weights_h(num_nodes), thresholds_h(num_nodes); std::vector<int> fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; updateHost(weights_h.data(), weights_d, num_nodes, stream); updateHost(thresholds_h.data(), thresholds_d, num_nodes, stream); updateHost(fids_h.data(), fids_d, num_nodes, stream); updateHost(def_lefts_h, def_lefts_d, num_nodes, stream); updateHost(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::dense_node_init(&nodes[i], weights_h[i], thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(cudaFree(is_leafs_d)); CUDA_CHECK(cudaFree(def_lefts_d)); CUDA_CHECK(cudaFree(fids_d)); CUDA_CHECK(cudaFree(thresholds_d)); CUDA_CHECK(cudaFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.num_rows * ps.num_cols; allocate(data_d, num_data); bool* mask_d = nullptr; allocate(mask_d, num_data); // generate random data Random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; nan_kernel<<<ceildiv(int(num_data), tpb), tpb, 0, stream>>>( data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(cudaPeekAtLastError()); // copy to host data_h.resize(num_data); updateHost(data_h.data(), data_d, num_data, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // clean up CUDA_CHECK(cudaFree(mask_d)); } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.num_rows); int num_nodes = tree_num_nodes(); for (int i = 0; i < ps.num_rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]); } if ((ps.output & fil::output_t::AVG) != 0) pred = pred / ps.num_trees; pred += ps.global_bias; if ((ps.output & fil::output_t::SIGMOID) != 0) pred = sigmoid(pred); if ((ps.output & fil::output_t::THRESHOLD) != 0) { pred = pred > ps.threshold ? 1.0f : 0.0f; } want_preds_h[i] = pred; } // copy to GPU allocate(want_preds_d, ps.num_rows); updateDevice(want_preds_d, want_preds_h.data(), ps.num_rows, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict allocate(preds_d, ps.num_rows); fil::predict(handle, forest, preds_d, data_d, ps.num_rows); CUDA_CHECK(cudaStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { ASSERT_TRUE(devArrMatch(want_preds_d, preds_d, ps.num_rows, CompareApprox<float>(ps.tolerance), stream)); } float infer_one_tree(fil::dense_node_t* root, float* data) { int curr = 0; float output = 0.0f, threshold = 0.0f; int fid = 0; bool def_left = false, is_leaf = false; for (;;) { fil::dense_node_decode(&root[curr], &output, &threshold, &fid, &def_left, &is_leaf); if (is_leaf) break; float val = data[fid]; bool cond = isnan(val) ? !def_left : val >= threshold; curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* want_preds_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node_t> nodes; // parameters cudaStream_t stream; cumlHandle handle; FilTestParams ps; }; class PredictDenseFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.depth = ps.depth; fil_ps.num_trees = ps.num_trees; fil_ps.num_cols = ps.num_cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil_ps.global_bias = ps.global_bias; fil::init_dense(handle, pforest, nodes.data(), &fil_ps); } }; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; TL_CPP_CHECK(builder->CreateNode(key)); int feature; float threshold, output; bool is_leaf, default_left; fil::dense_node_decode(&nodes[node], &output, &threshold, &feature, &default_left, &is_leaf); if (is_leaf) { TL_CPP_CHECK(builder->SetLeafNode(key, output)); } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); TL_CPP_CHECK(builder->SetNumericalTestNode( key, feature, ps.op, threshold, default_left, left_key, right_key)); } return key; } void init_forest(fil::forest_t* pforest) override { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; std::unique_ptr<tlf::ModelBuilder> model_builder( new tlf::ModelBuilder(ps.num_cols, 1, random_forest_flag)); // prediction transform if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } // global bias char* global_bias_str = nullptr; ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0, "cannot convert global_bias into a string"); model_builder->SetModelParam("global_bias", global_bias_str); free(global_bias_str); // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); TL_CPP_CHECK(tree_builder->SetRootNode(root_key)); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model(new tl::Model); TL_CPP_CHECK(model_builder->CommitModel(model.get())); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::THRESHOLD) != 0; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(cudaStreamSynchronize(stream)); } }; // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, // global_bias, algo, seed, tolerance std::vector<FilTestParams> predict_dense_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f}, }; TEST_P(PredictDenseFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest, testing::ValuesIn(predict_dense_inputs)); std::vector<FilTestParams> import_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 1.0, 0.5, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, }; TEST_P(TreeliteFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteFilTest, testing::ValuesIn(import_inputs)); } // namespace ML
0ffdfed1c78491ee570066149bba36c40a3e7a28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file exercise3.cu * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Exercise 3 - CUDA MATMUL * * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define gpuErrchk(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } extern "C" { #include "utils.h" } #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #ifndef N #define N (1 << 10) #endif #ifndef TILE_W #define TILE_W 4 #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE 32 #endif void gemm(float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int n) { #pragma omp parallel for collapse(2) for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { float sum = 0.0; for (int k = 0; k < n; ++k) { sum += a[i * n + k] * b[k *n + j]; } c[i * n + j] = sum; } } } /** * @brief EX 3 - Complete Matrix Multiplication */ __global__ void gemm_kernel(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n) { //TODO: Add GEMM kernel body } int main(int argc, char *argv[]) { int n = N, iret = 0; float *a, *b, *c, *g; struct timespec rt[2]; double wt; // walltime if (argc > 1) n = atoi(argv[1]); if (NULL == (a = (float *)malloc(sizeof(*a) * n * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } if (NULL == (b = (float *)malloc(sizeof(*b) * n * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (c = (float *)malloc(sizeof(*c) * n * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (NULL == (g = (float *)malloc(sizeof(*g) * n * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { free(a); free(b); free(c); free(g); exit(EXIT_FAILURE); } //Init Data int _b = rand() % TWO04; int _c = rand() % TWO08; #pragma omp parallel for for (int i = 0; i < n * n; i++) { a[i] = _b / (float)TWO02; b[i] = _c / (float)TWO04; c[i] = g[i] = 0.0; } clock_gettime(CLOCK_REALTIME, rt + 0); gemm(a, b, g, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt)); //CUDA Buffer Allocation float *d_a, *d_b, *d_c; //TODO: Add HERE Cuda data allocation clock_gettime(CLOCK_REALTIME, rt + 0); //TODO: Add HERE Cuda data transfer //TODO: Add HERE dimBlock //TODO: Add HERE dimGrid //TODO: Add HERE kernel launch gpuErrchk(hipPeekAtLastError()); //TODO: Add HERE Cuda data transfer clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("GEMM (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt)); for (int i = 0; i < n * n; i++) { iret = *(int *)(g + i) ^ *(int *)(c + i); assert(iret == 0); } free(a); free(b); free(c); free(g); gpuErrchk(hipFree(d_a)); gpuErrchk(hipFree(d_b)); gpuErrchk(hipFree(d_c)); return 0; }
0ffdfed1c78491ee570066149bba36c40a3e7a28.cu
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file exercise3.cu * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Exercise 3 - CUDA MATMUL * * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define gpuErrchk(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } extern "C" { #include "utils.h" } #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #ifndef N #define N (1 << 10) #endif #ifndef TILE_W #define TILE_W 4 #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE 32 #endif void gemm(float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int n) { #pragma omp parallel for collapse(2) for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { float sum = 0.0; for (int k = 0; k < n; ++k) { sum += a[i * n + k] * b[k *n + j]; } c[i * n + j] = sum; } } } /** * @brief EX 3 - Complete Matrix Multiplication */ __global__ void gemm_kernel(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n) { //TODO: Add GEMM kernel body } int main(int argc, char *argv[]) { int n = N, iret = 0; float *a, *b, *c, *g; struct timespec rt[2]; double wt; // walltime if (argc > 1) n = atoi(argv[1]); if (NULL == (a = (float *)malloc(sizeof(*a) * n * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } if (NULL == (b = (float *)malloc(sizeof(*b) * n * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (c = (float *)malloc(sizeof(*c) * n * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (NULL == (g = (float *)malloc(sizeof(*g) * n * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { free(a); free(b); free(c); free(g); exit(EXIT_FAILURE); } //Init Data int _b = rand() % TWO04; int _c = rand() % TWO08; #pragma omp parallel for for (int i = 0; i < n * n; i++) { a[i] = _b / (float)TWO02; b[i] = _c / (float)TWO04; c[i] = g[i] = 0.0; } clock_gettime(CLOCK_REALTIME, rt + 0); gemm(a, b, g, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt)); //CUDA Buffer Allocation float *d_a, *d_b, *d_c; //TODO: Add HERE Cuda data allocation clock_gettime(CLOCK_REALTIME, rt + 0); //TODO: Add HERE Cuda data transfer //TODO: Add HERE dimBlock //TODO: Add HERE dimGrid //TODO: Add HERE kernel launch gpuErrchk(cudaPeekAtLastError()); //TODO: Add HERE Cuda data transfer clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("GEMM (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt)); for (int i = 0; i < n * n; i++) { iret = *(int *)(g + i) ^ *(int *)(c + i); assert(iret == 0); } free(a); free(b); free(c); free(g); gpuErrchk(cudaFree(d_a)); gpuErrchk(cudaFree(d_b)); gpuErrchk(cudaFree(d_c)); return 0; }
d93a6f19c58363c0e024f127f6dd9ef22f41ebcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <random> #include <iostream> #include <cmath> #include <vector> #include <math.h> #include <stdio.h> #include <rocblas.h> #include <hiprand/hiprand.h> #define recur_batch_size 2; #define TRAINING (false) #ifndef PERFOPTS #define PERFOPTS (31) #endif int option = 0; #define GROUP_GEMM ((PERFOPTS & 1)) #define USE_STREAMS true #define FUSE_PW ((PERFOPTS & 4)) #define PRE_TRANSPOSE ((PERFOPTS & 8)) #define RECUR_BATCH_SIZE (((PERFOPTS & 16) ? 2 : 1)) // Device functions __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } __global__ void elementWise_fp(int hiddenSize, int miniBatch, float *tmp_h, float *tmp_i, float *bias, float *linearGates, float *h_out, float *i_out, float *c_in, float *c_out, bool training) { int index = blockIdx.x * blockDim.x + threadIdx.x; int numElements = miniBatch * hiddenSize; if (index >= numElements) return; int batch = index / hiddenSize; int gateIndex = (index % hiddenSize) + 4 * batch * hiddenSize; float g[4]; for (int i = 0; i < 4; i++) { g[i] = tmp_i[i * hiddenSize + gateIndex] + tmp_h[i * hiddenSize + gateIndex]; g[i] += bias[i * hiddenSize + index % hiddenSize] + bias[(i + 4) * hiddenSize + index % hiddenSize]; if (training) linearGates[gateIndex + i * hiddenSize] = g[i]; } float in_gate = sigmoidf(g[0]); float forget_gate = sigmoidf(g[1]); float in_gate2 = tanhf(g[2]); float out_gate = sigmoidf(g[3]); float val = (forget_gate * c_in[index]) + (in_gate * in_gate2); c_out[index] = val; val = out_gate * tanhf(val); h_out[index] = val; i_out[index] = val; } // define the error information #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) { if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) { if (stat != HIPRAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } float one = 1.f; float zero = 0.f; class LSTMNetwork{ public: hipblasHandle_t handle; // arguments about the network int num_layers; int mem_cell_num; int input_dim; int mini_batch; int seq_length; int num_elements; float *input_data; float *output_data; float *cell_state; // workspace for the result of R * h and W * x float *temp_output; float *temp_input; // used for training float *activations; // stream and event hipStream_t *stream_i; hipStream_t *stream_h; hipEvent_t **events_i; hipEvent_t **events_h; bool training; // W and R // W is for the input // R is for the h_prev // TODO: is it necessary to have another work space for weight float *weight_in; float *weight_out; float *bias; LSTMNetwork(int num_layers, int mem_cell_num, int input_dim, int mini_batch, int seq_length, float *input); ~LSTMNetwork(); float feedforward(bool training); void backprop(); void transpose_weight(); }; LSTMNetwork::LSTMNetwork(int num_layers, int mem_cell_num, int input_dim, int mini_batch, int seq_length, float *input) { /*this->mini_batch = 64; bool checkF = true; this->num_layers = 4; this->seq_length = 100; this->mem_cell_num = 512; this->input_dim = 512;*/ this->num_layers = num_layers; this->mem_cell_num = mem_cell_num; this->input_dim = input_dim; this->mini_batch = mini_batch; this->seq_length = seq_length; // initialize the handle cublasErrCheck(hipblasCreate(&this->handle)); this->num_elements = input_dim * mini_batch; // initalize stream and event; stream_i = (hipStream_t*)malloc(num_layers * sizeof(hipStream_t)); stream_h = (hipStream_t*)malloc(num_layers * sizeof(hipStream_t)); events_i = (hipEvent_t**)malloc(num_layers * sizeof(hipEvent_t*)); events_h = (hipEvent_t**)malloc(num_layers * sizeof(hipEvent_t*)); for (int i = 0; i < num_layers; i++) { events_i[i] = (hipEvent_t*)malloc(seq_length * sizeof(hipEvent_t)); events_h[i] = (hipEvent_t*)malloc(seq_length * sizeof(hipEvent_t)); } // initialize stream for (int i = 0; i < num_layers; i++) { if (USE_STREAMS) { cudaErrCheck(hipStreamCreate(&stream_i[i])); // Priority is empirical. cudaErrCheck(hipStreamCreateWithPriority(&stream_h[i], 0, -1)); } else { stream_i[i] = NULL; stream_h[i] = NULL; } } cudaErrCheck(hipMalloc((void**)&output_data, (seq_length + 1) * (num_layers) * num_elements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&input_data, (seq_length) * (num_layers + 1) * num_elements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&cell_state, (seq_length + 1) * (num_layers) * num_elements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&weight_in, num_layers * mem_cell_num * input_dim * 8 * sizeof(float))); cudaErrCheck(hipMalloc((void**)&weight_out, num_layers * mem_cell_num * input_dim * 8 * sizeof(float))); cudaErrCheck(hipMalloc((void**)&temp_output, 4 * num_layers * num_elements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&temp_input, 4 * seq_length * num_elements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&bias, num_layers * mem_cell_num * 8 * sizeof(float))); // TODO: copy input into the input_data // TOOD: randomlize the first column of each layer for cell_data and output_data cudaErrCheck(hipMalloc((void**)&activations, 4 * seq_length * num_layers * num_elements * sizeof(float))); // randomlize the weight hiprandGenerator_t rng; curandErrCheck(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT)); curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(rng, 1337ull)); curandErrCheck(hiprandGenerateUniform(rng, this->weight_in, num_layers * mem_cell_num * input_dim * 8)); curandErrCheck(hiprandGenerateUniform(rng, this->output_data, (seq_length + 1) * (num_layers) * num_elements)); curandErrCheck(hiprandGenerateUniform(rng, this->cell_state, (seq_length + 1) * (num_layers) * num_elements)); curandErrCheck(hiprandGenerateUniform(rng, this->input_data, (seq_length) * (num_layers + 1) * num_elements)); curandErrCheck(hiprandGenerateUniform(rng, bias, num_layers * mem_cell_num * 8)); curandErrCheck(hiprandDestroyGenerator(rng)); // TOOD: do we need this? cudaErrCheck(hipDeviceSynchronize()); } LSTMNetwork::~LSTMNetwork() { cublasErrCheck(hipblasDestroy(this->handle)); cudaErrCheck(hipFree(output_data)); cudaErrCheck(hipFree(input_data)); cudaErrCheck(hipFree(cell_state)); cudaErrCheck(hipFree(this->weight_in)); cudaErrCheck(hipFree(this->weight_out)); cudaErrCheck(hipFree(bias)); cudaErrCheck(hipFree(activations)); } // optimization 4: PRE-TRANSPOSING THE WEIGHT MATRIX void LSTMNetwork::transpose_weight() { for (int i = 0; i < this->num_layers; i++) { float *W_in_pointer = this->weight_in + i * this->mem_cell_num * this->input_dim * 8; float *W_out_pointer = this->weight_out + i * this->mem_cell_num * this->input_dim * 8; float *R_in_pointer = this->weight_in + i * this->mem_cell_num * this->input_dim * 8 + this->mem_cell_num * this->input_dim * 4; float *R_out_pointer = this->weight_out + i * this->mem_cell_num * this->input_dim * 8 + this->mem_cell_num * this->input_dim * 4; // transpose 4 * W for one layer cublasErrCheck(hipblasSetStream(handle, stream_i[i])); cublasErrCheck(hipblasSgeam(this->handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 4 * this->mem_cell_num, this->input_dim, &one, W_in_pointer, this->mem_cell_num, &zero, NULL, 4 * this->mem_cell_num, W_out_pointer, 4 * this->mem_cell_num)); // transpose 4 * R for one layer cublasErrCheck(hipblasSetStream(handle, stream_h[i])); cublasErrCheck(hipblasSgeam(this->handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 4 * this->mem_cell_num, this->input_dim, &one, R_in_pointer, this->mem_cell_num, &zero, NULL, 4 * this->mem_cell_num, R_out_pointer, 4 * this->mem_cell_num)); //cublasErrCheck(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_i_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_i_out, 4 * hiddenSize)); } } // helper function for forward __global__ void element_wise_operation(int mem_cell_num, int mini_batch, float *temp_input, float *temp_output, float *bias, float *output_data, float *input_data, float *cell_prev, float *cell_curr, bool training, float *activation) { // h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, // i_data + i * numElements + (layer + 1) * seqLength * numElements, int index = blockIdx.x * blockDim.x + threadIdx.x; int num_element = mem_cell_num * mini_batch; if (index > num_element) { return; } int column_index = index / mem_cell_num; int row_index = index % mem_cell_num; float result[4]; // TODO: check the error of this part for (int i = 0; i < 4; i++) { int real_row_index = row_index + i * mem_cell_num; int index_for_temp = column_index * 4 * mem_cell_num + real_row_index; result[i] = *(temp_input + index_for_temp) + *(temp_output + index_for_temp) + *(bias + real_row_index) + *(bias + (i + 4) * mem_cell_num + row_index); } // sequence: i, f, o, ct, other matrix should be stacked in this order result[0] = sigmoidf(result[0]); result[1] = sigmoidf(result[1]); result[2] = sigmoidf(result[2]); result[3] = tanhf(result[3]); if (training) { for (int i = 0; i < 4; i++) { activation[column_index * 4 * mem_cell_num + i * mem_cell_num + row_index] = result[i]; } } int data_offset = column_index * mem_cell_num + row_index; float ct_prev = *(cell_prev + data_offset); float ct = result[1] * ct_prev + result[0] * result[3]; float ht = result[2] * tanhf(ct); // store ct and ht *(cell_curr + data_offset) = ct; *(output_data + data_offset) = ht; *(input_data + data_offset) = ht; } // perform the feedforward for entire network float LSTMNetwork::feedforward(bool training) { float elapsedTime; option = 2; if (option == 2) { bool checkF = true; // Timing starts here hipEvent_t start, stop; cudaErrCheck(hipEventCreate(&start)); cudaErrCheck(hipEventCreate(&stop)); cudaErrCheck(hipEventRecord(start)); float alpha = 1.f; float beta = 0.f; transpose_weight(); int row_start = 0; int column_start = 0; //printf("%d,%d, \n", row_start, column_start); //printf("here 1 \n"); int count = 0; while (row_start < this->num_layers) { int i = row_start; int j = column_start; //printf("%d,%d,%d,%d, \n", i, j, row_start, column_start); //printf("here 2 \n"); while (j >= 0 && i < this->num_layers) { int j_end = j + recur_batch_size; if (j_end > this->seq_length) j_end = this->seq_length; //printf("%d,%d,%d,%d, \n", i, j, row_start, column_start); //printf("== here 3 \n"); cublasErrCheck(hipblasSetStream(handle, stream_i[i])); for (int k = j; k < j_end; k++) { if (i > 0) { cudaErrCheck(hipStreamWaitEvent(stream_i[i], events_h[i - 1][k], 0)); cudaErrCheck(hipEventDestroy(events_h[i - 1][k])); } } int layer_index = i; int sequence_index = j; float *W = this->weight_out + layer_index * this->mem_cell_num * this->input_dim * 8; cublasErrCheck(hipblasSgemm(this->handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4 * this->mem_cell_num, mini_batch * (j_end - j), this->input_dim, &one, W, 4 * this->mem_cell_num, this->input_data + sequence_index * this->num_elements + layer_index * this->num_elements * this->seq_length, this->input_dim, &one, temp_input + sequence_index * 4 * this->num_elements, 4 * this->mem_cell_num)); for (int k = j; k < j_end; k++) { cudaErrCheck(hipEventCreate(&events_i[layer_index][k], hipEventDisableTiming)); cudaErrCheck(hipEventRecord(events_i[layer_index][k], stream_i[layer_index])); } for (int k = j; k < j_end; k++) { // perform R * h prev //printf("print access : %d, %d\n", layer_index, k); cublasErrCheck(hipblasSetStream(handle, stream_h[layer_index])); sequence_index = k; float *R = this->weight_out + layer_index * this->mem_cell_num * this->input_dim * 8 + this->mem_cell_num * this->input_dim * 4; cublasErrCheck(hipblasSgemm(this->handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4 * this->mem_cell_num, mini_batch, this->input_dim, &one, R, 4 * this->mem_cell_num, this->output_data + sequence_index * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->mem_cell_num, &one, temp_output + layer_index * 4 * this->num_elements, 4 * this->mem_cell_num)); // wait for the cudaErrCheck(hipStreamWaitEvent(stream_h[layer_index], events_i[layer_index][k], 0)); cudaErrCheck(hipEventDestroy(events_i[layer_index][k])); // element wise operation dim3 blockDim; dim3 gridDim; blockDim.x = 256; gridDim.x = (this->num_elements + blockDim.x - 1) / blockDim.x; sequence_index = k; //const int threadsPerBlock = 256; //const int blocks = (this->num_elements + cdffg - 1) / threadsPerBlock; element_wise_operation << < gridDim, blockDim, 0, stream_h[layer_index]>> > (this->mem_cell_num, this->mini_batch, temp_input + sequence_index * 4 * this->num_elements, temp_output + layer_index * 4 * this->num_elements, this->bias + layer_index * mem_cell_num * 8, this->output_data + (sequence_index + 1) * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->input_data + sequence_index * this->num_elements + (layer_index + 1) * this->num_elements * this->seq_length, this->cell_state + sequence_index * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->cell_state + (sequence_index + 1) * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->training, this->activations + layer_index * seq_length * 4 * num_elements + sequence_index * 4 * num_elements ); cudaErrCheck(hipGetLastError()); count++; //printf("%d is cound\n", count); if (layer_index != this->num_layers - 1) { cudaErrCheck(hipEventCreate(&events_h[layer_index][k], hipEventDisableTiming)); cudaErrCheck(hipEventRecord(events_h[layer_index][k], stream_h[layer_index])); } } i++; j -= recur_batch_size; } if (column_start >= this->seq_length - 2) { row_start++; } else { column_start += recur_batch_size; } } cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&elapsedTime, start, stop)); cudaErrCheck(hipDeviceSynchronize()); // We're done. Print some checksums if (checkF) { float* testOutputi; float* testOutputh; float* testOutputc; testOutputi = (float*)malloc(this->num_elements * this->seq_length * sizeof(float)); testOutputh = (float*)malloc(this->num_elements * this->num_layers * sizeof(float)); testOutputc = (float*)malloc(this->num_elements * this->num_layers * sizeof(float)); cudaErrCheck(hipMemcpy(testOutputi, this->input_data + this->num_layers * seq_length * num_elements, seq_length * num_elements * sizeof(float), hipMemcpyDeviceToHost)); for (int layer = 0; layer < num_layers; layer++) { cudaErrCheck(hipMemcpy(testOutputh + layer * this->num_elements, this->output_data + seq_length * num_elements + layer * (seq_length + 1) * num_elements, num_elements * sizeof(float), hipMemcpyDeviceToHost)); cudaErrCheck(hipMemcpy(testOutputc + layer * this->num_elements, this->cell_state + seq_length * num_elements + layer * (seq_length + 1) * num_elements, num_elements * sizeof(float), hipMemcpyDeviceToHost)); } double checksumi = 0.; double checksumh = 0.; double checksumc = 0.; for (int m = 0; m < mini_batch; m++) { for (int j = 0; j < seq_length; j++) { for (int i = 0; i < mem_cell_num; i++) { checksumi += testOutputi[j * num_elements + m * mem_cell_num + i]; if (mem_cell_num <= 8) printf("i: (%d,%d): %E\n", j, i, testOutputi[j * num_elements + m * mem_cell_num + i]); } } for (int j = 0; j < num_layers; j++) { for (int i = 0; i < mem_cell_num; i++) { checksumh += testOutputh[j * num_elements + m * mem_cell_num + i]; checksumc += testOutputc[j * num_elements + m * mem_cell_num + i]; } } if (m == 0) printf("i checksum (example %d) %E\n", m, checksumi); if (m == 0) printf("h checksum (example %d) %E\n", m, checksumh); if (m == 0) printf("c checksum (example %d) %E\n", m, checksumc); } printf("i checksum %E ", checksumi); printf("c checksum %E ", checksumc); printf("h checksum %E\n", checksumh); free(testOutputi); free(testOutputc); free(testOutputh); } } return elapsedTime; } __global__ void element_wise_operation_prop1(float *celll_state) { /*int index = blockIdx.x * blockDim.x + threadIdx.x; int num_element = mem_cell_num * mini_batch;*/ } void LSTMNetwork::backprop() { const int threadsPerBlock = 256; const int blocks = (this->num_elements + threadsPerBlock - 1) / threadsPerBlock; while (1) { int layer_index; int sequence_index; //element_wise_operation_prop1<<<blocks, threadsPerBlock>>>(this->cell_state); //top_diff_h; //top_diff_s; } } int main(int argc, char* argv[]) { int seqLength; int numLayers; int hiddenSize; int miniBatch; if (argc == 5) { seqLength = atoi(argv[1]); numLayers = atoi(argv[2]); hiddenSize = atoi(argv[3]); miniBatch = atoi(argv[4]); } else if (argc == 1) { printf("Running with default settings\n"); seqLength = 100; numLayers = 4; hiddenSize = 512; miniBatch = 64; } else { printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>\n"); return 1; } printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d\n", seqLength, numLayers, hiddenSize, miniBatch); int numRuns = 1; float totalTime = 0.f; LSTMNetwork network(4, 512, 512, 64, 100, NULL); for (int run = 0; run < numRuns; run++) { totalTime+=network.feedforward(true); //totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, true); } printf("Runtime %fms\n", totalTime / numRuns); return time < 0; }
d93a6f19c58363c0e024f127f6dd9ef22f41ebcc.cu
#include <random> #include <iostream> #include <cmath> #include <vector> #include <math.h> #include <stdio.h> #include <cublas_v2.h> #include <curand.h> #define recur_batch_size 2; #define TRAINING (false) #ifndef PERFOPTS #define PERFOPTS (31) #endif int option = 0; #define GROUP_GEMM ((PERFOPTS & 1)) #define USE_STREAMS true #define FUSE_PW ((PERFOPTS & 4)) #define PRE_TRANSPOSE ((PERFOPTS & 8)) #define RECUR_BATCH_SIZE (((PERFOPTS & 16) ? 2 : 1)) // Device functions __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } __global__ void elementWise_fp(int hiddenSize, int miniBatch, float *tmp_h, float *tmp_i, float *bias, float *linearGates, float *h_out, float *i_out, float *c_in, float *c_out, bool training) { int index = blockIdx.x * blockDim.x + threadIdx.x; int numElements = miniBatch * hiddenSize; if (index >= numElements) return; int batch = index / hiddenSize; int gateIndex = (index % hiddenSize) + 4 * batch * hiddenSize; float g[4]; for (int i = 0; i < 4; i++) { g[i] = tmp_i[i * hiddenSize + gateIndex] + tmp_h[i * hiddenSize + gateIndex]; g[i] += bias[i * hiddenSize + index % hiddenSize] + bias[(i + 4) * hiddenSize + index % hiddenSize]; if (training) linearGates[gateIndex + i * hiddenSize] = g[i]; } float in_gate = sigmoidf(g[0]); float forget_gate = sigmoidf(g[1]); float in_gate2 = tanhf(g[2]); float out_gate = sigmoidf(g[3]); float val = (forget_gate * c_in[index]) + (in_gate * in_gate2); c_out[index] = val; val = out_gate * tanhf(val); h_out[index] = val; i_out[index] = val; } // define the error information #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } float one = 1.f; float zero = 0.f; class LSTMNetwork{ public: cublasHandle_t handle; // arguments about the network int num_layers; int mem_cell_num; int input_dim; int mini_batch; int seq_length; int num_elements; float *input_data; float *output_data; float *cell_state; // workspace for the result of R * h and W * x float *temp_output; float *temp_input; // used for training float *activations; // stream and event cudaStream_t *stream_i; cudaStream_t *stream_h; cudaEvent_t **events_i; cudaEvent_t **events_h; bool training; // W and R // W is for the input // R is for the h_prev // TODO: is it necessary to have another work space for weight float *weight_in; float *weight_out; float *bias; LSTMNetwork(int num_layers, int mem_cell_num, int input_dim, int mini_batch, int seq_length, float *input); ~LSTMNetwork(); float feedforward(bool training); void backprop(); void transpose_weight(); }; LSTMNetwork::LSTMNetwork(int num_layers, int mem_cell_num, int input_dim, int mini_batch, int seq_length, float *input) { /*this->mini_batch = 64; bool checkF = true; this->num_layers = 4; this->seq_length = 100; this->mem_cell_num = 512; this->input_dim = 512;*/ this->num_layers = num_layers; this->mem_cell_num = mem_cell_num; this->input_dim = input_dim; this->mini_batch = mini_batch; this->seq_length = seq_length; // initialize the handle cublasErrCheck(cublasCreate(&this->handle)); this->num_elements = input_dim * mini_batch; // initalize stream and event; stream_i = (cudaStream_t*)malloc(num_layers * sizeof(cudaStream_t)); stream_h = (cudaStream_t*)malloc(num_layers * sizeof(cudaStream_t)); events_i = (cudaEvent_t**)malloc(num_layers * sizeof(cudaEvent_t*)); events_h = (cudaEvent_t**)malloc(num_layers * sizeof(cudaEvent_t*)); for (int i = 0; i < num_layers; i++) { events_i[i] = (cudaEvent_t*)malloc(seq_length * sizeof(cudaEvent_t)); events_h[i] = (cudaEvent_t*)malloc(seq_length * sizeof(cudaEvent_t)); } // initialize stream for (int i = 0; i < num_layers; i++) { if (USE_STREAMS) { cudaErrCheck(cudaStreamCreate(&stream_i[i])); // Priority is empirical. cudaErrCheck(cudaStreamCreateWithPriority(&stream_h[i], 0, -1)); } else { stream_i[i] = NULL; stream_h[i] = NULL; } } cudaErrCheck(cudaMalloc((void**)&output_data, (seq_length + 1) * (num_layers) * num_elements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&input_data, (seq_length) * (num_layers + 1) * num_elements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&cell_state, (seq_length + 1) * (num_layers) * num_elements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&weight_in, num_layers * mem_cell_num * input_dim * 8 * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&weight_out, num_layers * mem_cell_num * input_dim * 8 * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&temp_output, 4 * num_layers * num_elements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&temp_input, 4 * seq_length * num_elements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&bias, num_layers * mem_cell_num * 8 * sizeof(float))); // TODO: copy input into the input_data // TOOD: randomlize the first column of each layer for cell_data and output_data cudaErrCheck(cudaMalloc((void**)&activations, 4 * seq_length * num_layers * num_elements * sizeof(float))); // randomlize the weight curandGenerator_t rng; curandErrCheck(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT)); curandErrCheck(curandSetPseudoRandomGeneratorSeed(rng, 1337ull)); curandErrCheck(curandGenerateUniform(rng, this->weight_in, num_layers * mem_cell_num * input_dim * 8)); curandErrCheck(curandGenerateUniform(rng, this->output_data, (seq_length + 1) * (num_layers) * num_elements)); curandErrCheck(curandGenerateUniform(rng, this->cell_state, (seq_length + 1) * (num_layers) * num_elements)); curandErrCheck(curandGenerateUniform(rng, this->input_data, (seq_length) * (num_layers + 1) * num_elements)); curandErrCheck(curandGenerateUniform(rng, bias, num_layers * mem_cell_num * 8)); curandErrCheck(curandDestroyGenerator(rng)); // TOOD: do we need this? cudaErrCheck(cudaDeviceSynchronize()); } LSTMNetwork::~LSTMNetwork() { cublasErrCheck(cublasDestroy(this->handle)); cudaErrCheck(cudaFree(output_data)); cudaErrCheck(cudaFree(input_data)); cudaErrCheck(cudaFree(cell_state)); cudaErrCheck(cudaFree(this->weight_in)); cudaErrCheck(cudaFree(this->weight_out)); cudaErrCheck(cudaFree(bias)); cudaErrCheck(cudaFree(activations)); } // optimization 4: PRE-TRANSPOSING THE WEIGHT MATRIX void LSTMNetwork::transpose_weight() { for (int i = 0; i < this->num_layers; i++) { float *W_in_pointer = this->weight_in + i * this->mem_cell_num * this->input_dim * 8; float *W_out_pointer = this->weight_out + i * this->mem_cell_num * this->input_dim * 8; float *R_in_pointer = this->weight_in + i * this->mem_cell_num * this->input_dim * 8 + this->mem_cell_num * this->input_dim * 4; float *R_out_pointer = this->weight_out + i * this->mem_cell_num * this->input_dim * 8 + this->mem_cell_num * this->input_dim * 4; // transpose 4 * W for one layer cublasErrCheck(cublasSetStream(handle, stream_i[i])); cublasErrCheck(cublasSgeam(this->handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * this->mem_cell_num, this->input_dim, &one, W_in_pointer, this->mem_cell_num, &zero, NULL, 4 * this->mem_cell_num, W_out_pointer, 4 * this->mem_cell_num)); // transpose 4 * R for one layer cublasErrCheck(cublasSetStream(handle, stream_h[i])); cublasErrCheck(cublasSgeam(this->handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * this->mem_cell_num, this->input_dim, &one, R_in_pointer, this->mem_cell_num, &zero, NULL, 4 * this->mem_cell_num, R_out_pointer, 4 * this->mem_cell_num)); //cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_i_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_i_out, 4 * hiddenSize)); } } // helper function for forward __global__ void element_wise_operation(int mem_cell_num, int mini_batch, float *temp_input, float *temp_output, float *bias, float *output_data, float *input_data, float *cell_prev, float *cell_curr, bool training, float *activation) { // h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, // i_data + i * numElements + (layer + 1) * seqLength * numElements, int index = blockIdx.x * blockDim.x + threadIdx.x; int num_element = mem_cell_num * mini_batch; if (index > num_element) { return; } int column_index = index / mem_cell_num; int row_index = index % mem_cell_num; float result[4]; // TODO: check the error of this part for (int i = 0; i < 4; i++) { int real_row_index = row_index + i * mem_cell_num; int index_for_temp = column_index * 4 * mem_cell_num + real_row_index; result[i] = *(temp_input + index_for_temp) + *(temp_output + index_for_temp) + *(bias + real_row_index) + *(bias + (i + 4) * mem_cell_num + row_index); } // sequence: i, f, o, ct, other matrix should be stacked in this order result[0] = sigmoidf(result[0]); result[1] = sigmoidf(result[1]); result[2] = sigmoidf(result[2]); result[3] = tanhf(result[3]); if (training) { for (int i = 0; i < 4; i++) { activation[column_index * 4 * mem_cell_num + i * mem_cell_num + row_index] = result[i]; } } int data_offset = column_index * mem_cell_num + row_index; float ct_prev = *(cell_prev + data_offset); float ct = result[1] * ct_prev + result[0] * result[3]; float ht = result[2] * tanhf(ct); // store ct and ht *(cell_curr + data_offset) = ct; *(output_data + data_offset) = ht; *(input_data + data_offset) = ht; } // perform the feedforward for entire network float LSTMNetwork::feedforward(bool training) { float elapsedTime; option = 2; if (option == 2) { bool checkF = true; // Timing starts here cudaEvent_t start, stop; cudaErrCheck(cudaEventCreate(&start)); cudaErrCheck(cudaEventCreate(&stop)); cudaErrCheck(cudaEventRecord(start)); float alpha = 1.f; float beta = 0.f; transpose_weight(); int row_start = 0; int column_start = 0; //printf("%d,%d, \n", row_start, column_start); //printf("here 1 \n"); int count = 0; while (row_start < this->num_layers) { int i = row_start; int j = column_start; //printf("%d,%d,%d,%d, \n", i, j, row_start, column_start); //printf("here 2 \n"); while (j >= 0 && i < this->num_layers) { int j_end = j + recur_batch_size; if (j_end > this->seq_length) j_end = this->seq_length; //printf("%d,%d,%d,%d, \n", i, j, row_start, column_start); //printf("== here 3 \n"); cublasErrCheck(cublasSetStream(handle, stream_i[i])); for (int k = j; k < j_end; k++) { if (i > 0) { cudaErrCheck(cudaStreamWaitEvent(stream_i[i], events_h[i - 1][k], 0)); cudaErrCheck(cudaEventDestroy(events_h[i - 1][k])); } } int layer_index = i; int sequence_index = j; float *W = this->weight_out + layer_index * this->mem_cell_num * this->input_dim * 8; cublasErrCheck(cublasSgemm(this->handle, CUBLAS_OP_N, CUBLAS_OP_N, 4 * this->mem_cell_num, mini_batch * (j_end - j), this->input_dim, &one, W, 4 * this->mem_cell_num, this->input_data + sequence_index * this->num_elements + layer_index * this->num_elements * this->seq_length, this->input_dim, &one, temp_input + sequence_index * 4 * this->num_elements, 4 * this->mem_cell_num)); for (int k = j; k < j_end; k++) { cudaErrCheck(cudaEventCreate(&events_i[layer_index][k], cudaEventDisableTiming)); cudaErrCheck(cudaEventRecord(events_i[layer_index][k], stream_i[layer_index])); } for (int k = j; k < j_end; k++) { // perform R * h prev //printf("print access : %d, %d\n", layer_index, k); cublasErrCheck(cublasSetStream(handle, stream_h[layer_index])); sequence_index = k; float *R = this->weight_out + layer_index * this->mem_cell_num * this->input_dim * 8 + this->mem_cell_num * this->input_dim * 4; cublasErrCheck(cublasSgemm(this->handle, CUBLAS_OP_N, CUBLAS_OP_N, 4 * this->mem_cell_num, mini_batch, this->input_dim, &one, R, 4 * this->mem_cell_num, this->output_data + sequence_index * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->mem_cell_num, &one, temp_output + layer_index * 4 * this->num_elements, 4 * this->mem_cell_num)); // wait for the cudaErrCheck(cudaStreamWaitEvent(stream_h[layer_index], events_i[layer_index][k], 0)); cudaErrCheck(cudaEventDestroy(events_i[layer_index][k])); // element wise operation dim3 blockDim; dim3 gridDim; blockDim.x = 256; gridDim.x = (this->num_elements + blockDim.x - 1) / blockDim.x; sequence_index = k; //const int threadsPerBlock = 256; //const int blocks = (this->num_elements + cdffg - 1) / threadsPerBlock; element_wise_operation << < gridDim, blockDim, 0, stream_h[layer_index]>> > (this->mem_cell_num, this->mini_batch, temp_input + sequence_index * 4 * this->num_elements, temp_output + layer_index * 4 * this->num_elements, this->bias + layer_index * mem_cell_num * 8, this->output_data + (sequence_index + 1) * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->input_data + sequence_index * this->num_elements + (layer_index + 1) * this->num_elements * this->seq_length, this->cell_state + sequence_index * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->cell_state + (sequence_index + 1) * this->num_elements + layer_index * this->num_elements * (this->seq_length + 1), this->training, this->activations + layer_index * seq_length * 4 * num_elements + sequence_index * 4 * num_elements ); cudaErrCheck(cudaGetLastError()); count++; //printf("%d is cound\n", count); if (layer_index != this->num_layers - 1) { cudaErrCheck(cudaEventCreate(&events_h[layer_index][k], cudaEventDisableTiming)); cudaErrCheck(cudaEventRecord(events_h[layer_index][k], stream_h[layer_index])); } } i++; j -= recur_batch_size; } if (column_start >= this->seq_length - 2) { row_start++; } else { column_start += recur_batch_size; } } cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&elapsedTime, start, stop)); cudaErrCheck(cudaDeviceSynchronize()); // We're done. Print some checksums if (checkF) { float* testOutputi; float* testOutputh; float* testOutputc; testOutputi = (float*)malloc(this->num_elements * this->seq_length * sizeof(float)); testOutputh = (float*)malloc(this->num_elements * this->num_layers * sizeof(float)); testOutputc = (float*)malloc(this->num_elements * this->num_layers * sizeof(float)); cudaErrCheck(cudaMemcpy(testOutputi, this->input_data + this->num_layers * seq_length * num_elements, seq_length * num_elements * sizeof(float), cudaMemcpyDeviceToHost)); for (int layer = 0; layer < num_layers; layer++) { cudaErrCheck(cudaMemcpy(testOutputh + layer * this->num_elements, this->output_data + seq_length * num_elements + layer * (seq_length + 1) * num_elements, num_elements * sizeof(float), cudaMemcpyDeviceToHost)); cudaErrCheck(cudaMemcpy(testOutputc + layer * this->num_elements, this->cell_state + seq_length * num_elements + layer * (seq_length + 1) * num_elements, num_elements * sizeof(float), cudaMemcpyDeviceToHost)); } double checksumi = 0.; double checksumh = 0.; double checksumc = 0.; for (int m = 0; m < mini_batch; m++) { for (int j = 0; j < seq_length; j++) { for (int i = 0; i < mem_cell_num; i++) { checksumi += testOutputi[j * num_elements + m * mem_cell_num + i]; if (mem_cell_num <= 8) printf("i: (%d,%d): %E\n", j, i, testOutputi[j * num_elements + m * mem_cell_num + i]); } } for (int j = 0; j < num_layers; j++) { for (int i = 0; i < mem_cell_num; i++) { checksumh += testOutputh[j * num_elements + m * mem_cell_num + i]; checksumc += testOutputc[j * num_elements + m * mem_cell_num + i]; } } if (m == 0) printf("i checksum (example %d) %E\n", m, checksumi); if (m == 0) printf("h checksum (example %d) %E\n", m, checksumh); if (m == 0) printf("c checksum (example %d) %E\n", m, checksumc); } printf("i checksum %E ", checksumi); printf("c checksum %E ", checksumc); printf("h checksum %E\n", checksumh); free(testOutputi); free(testOutputc); free(testOutputh); } } return elapsedTime; } __global__ void element_wise_operation_prop1(float *celll_state) { /*int index = blockIdx.x * blockDim.x + threadIdx.x; int num_element = mem_cell_num * mini_batch;*/ } void LSTMNetwork::backprop() { const int threadsPerBlock = 256; const int blocks = (this->num_elements + threadsPerBlock - 1) / threadsPerBlock; while (1) { int layer_index; int sequence_index; //element_wise_operation_prop1<<<blocks, threadsPerBlock>>>(this->cell_state); //top_diff_h; //top_diff_s; } } int main(int argc, char* argv[]) { int seqLength; int numLayers; int hiddenSize; int miniBatch; if (argc == 5) { seqLength = atoi(argv[1]); numLayers = atoi(argv[2]); hiddenSize = atoi(argv[3]); miniBatch = atoi(argv[4]); } else if (argc == 1) { printf("Running with default settings\n"); seqLength = 100; numLayers = 4; hiddenSize = 512; miniBatch = 64; } else { printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>\n"); return 1; } printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d\n", seqLength, numLayers, hiddenSize, miniBatch); int numRuns = 1; float totalTime = 0.f; LSTMNetwork network(4, 512, 512, 64, 100, NULL); for (int run = 0; run < numRuns; run++) { totalTime+=network.feedforward(true); //totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, true); } printf("Runtime %fms\n", totalTime / numRuns); return time < 0; }
635a26fbade48c6c82db9df001fb4ce49b9ab063.hip
// !!! This is a file automatically generated by hipify!!! #include <iomanip> #include <memory> #include <chrono> #include <vector> #include <tuple> #include <hip/hip_runtime.h> #include <cudnn.h> #include <hiprand/hiprand.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include "tensor.h" #include "cudnn_helper.h" #define USE_GET 0 class cudnnCNN { TensorDescriptor4d<float> x_desc_; TensorDescriptor4d<float> h_desc_; FilterDescriptor4d<float> w_desc_; std::vector<int> output_dims_; int num_repeats_; size_t fwd_workspace_size_; size_t bwd_inputs_workspace_size_; size_t bwd_params_workspace_size_; Tensor<float> fwd_workspace_; Tensor<float> bwd_inputs_workspace_; Tensor<float> bwd_params_workspace_; cudnnConvolutionFwdAlgo_t fwd_algo_; cudnnConvolutionBwdDataAlgo_t bwd_inputs_algo_; cudnnConvolutionBwdFilterAlgo_t bwd_params_algo_; const float alpha_ = 1.f; const float beta_ = 0.f; ConvolutionDescriptor conv_desc_; CudnnHandle cudnn_handle_; public: cudnnCNN(int w, int h, int c, int n, int k, int r, int s, int pad_w, int pad_h, int wstride, int hstride) : cudnn_handle_(), x_desc_(CUDNN_TENSOR_NCHW, n, c, h, w), w_desc_(CUDNN_TENSOR_NCHW, k, c, r, s), conv_desc_(pad_h, pad_w, hstride, wstride) { int out_h, out_w, out_c, out_n; // Get output dimensions CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(conv_desc_.desc(), x_desc_.desc(), w_desc_.desc(), &out_n, &out_c, &out_h, &out_w)); h_desc_ = TensorDescriptor4d<float>(CUDNN_TENSOR_NCHW, out_n, out_c, out_h, out_w); output_dims_ = {out_w, out_h, out_c, out_n}; #if USE_GET // Pick forward convolution algorithm CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), w_desc_.desc(), conv_desc_.desc(), h_desc_.desc(), CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &fwd_algo_)); #else // Pick forward convolution algorithm cudnnConvolutionFwdAlgoPerf_t fwd_perf; int ret_count; CHECK_CUDNN_ERROR(cudnnFindConvolutionForwardAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), w_desc_.desc(), conv_desc_.desc(), h_desc_.desc(), 1, &ret_count, &fwd_perf)); fwd_algo_ = fwd_perf.algo; #endif // Set fwd workspace size CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle_.handle(), x_desc_.desc(), w_desc_.desc(), conv_desc_.desc(), h_desc_.desc(), fwd_algo_, &fwd_workspace_size_)); fwd_workspace_ = zeros(std::vector<int>{static_cast<int>(fwd_workspace_size_ / sizeof(float)), 1}); #if USE_GET // Pick backward convolution algorithm CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), w_desc_.desc(), CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &bwd_params_algo_)); #else cudnnConvolutionBwdFilterAlgoPerf_t filter_perf; CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), w_desc_.desc(), 1, &ret_count, &filter_perf)); bwd_params_algo_ = filter_perf.algo; #endif // Backward params workspace CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle_.handle(), x_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), w_desc_.desc(), bwd_params_algo_, &bwd_params_workspace_size_)); bwd_params_workspace_ = zeros(std::vector<int>{static_cast<int>(bwd_params_workspace_size_ / sizeof(float)), 1}); #if USE_GET // Pick backward wrt inputs convolution algorithm CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(), w_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), x_desc_.desc(), CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &bwd_inputs_algo_)); #else cudnnConvolutionBwdDataAlgoPerf_t data_perf; CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(), w_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), x_desc_.desc(), 1, &ret_count, &data_perf)); bwd_inputs_algo_ = data_perf.algo; #endif CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle_.handle(), w_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), x_desc_.desc(), bwd_inputs_algo_, &bwd_inputs_workspace_size_)); bwd_inputs_workspace_ = zeros(std::vector<int>{static_cast<int>(bwd_inputs_workspace_size_ / sizeof(float)), 1}); } std::vector<int> get_output_dims() { return output_dims_; } std::string get_fwd_algo_string() { if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM) return "IMPLICIT_GEMM"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) return "IMPLICIT_PRECOMP_GEMM"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_GEMM) return "GEMM"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_DIRECT) return "DIRECT"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT) return "FFT"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING) return "FFT_TILING"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD) return "WINOGRAD"; #if CUDNN_MAJOR >= 6 else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED) return "WINOGRAD_NONFUSED"; #endif else { std::stringstream ss; ss << "Illegal algorithm passed to get_fwd_algo_string. Algo: " << fwd_algo_ << std::endl; throw std::runtime_error(ss.str()); } } void forward(Tensor<float> x, Tensor<float> filter, Tensor<float> h) { // Convolution forward. CHECK_CUDNN_ERROR(cudnnConvolutionForward(cudnn_handle_.handle(), &alpha_, x_desc_.desc(), x.begin(), w_desc_.desc(), filter.begin(), conv_desc_.desc(), fwd_algo_, fwd_workspace_.begin(), fwd_workspace_size_, &beta_, h_desc_.desc(), h.begin())); } void backward_params(Tensor<float> x, Tensor<float> delta, Tensor<float> dW) { CHECK_CUDNN_ERROR(cudnnConvolutionBackwardFilter(cudnn_handle_.handle(), &alpha_, x_desc_.desc(), x.begin(), h_desc_.desc(), delta.begin(), conv_desc_.desc(), bwd_params_algo_, bwd_params_workspace_.begin(), bwd_params_workspace_size_, &beta_, w_desc_.desc(), dW.begin())); } void backward_inputs(Tensor<float> filter, Tensor<float> delta, Tensor<float> dX) { CHECK_CUDNN_ERROR(cudnnConvolutionBackwardData(cudnn_handle_.handle(), &alpha_, w_desc_.desc(), filter.begin(), h_desc_.desc(), delta.begin(), conv_desc_.desc(), bwd_inputs_algo_, bwd_inputs_workspace_.begin(), bwd_inputs_workspace_size_, &beta_, x_desc_.desc(), dX.begin())); } }; std::tuple<int, int, int, std::string> time_cnn( int k, int c, int r, int s, int n, int h, int w, int pad_h, int pad_w, int hstride, int wstride, int num_repeats, hiprandGenerator_t curand_gen ) { cudnnCNN cnn(w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride); // Allocate memory for filter auto filter = rand(std::vector<int>{r, s, c, k}, curand_gen); // Allocate memory for input auto input = rand(std::vector<int>{w, h, c, n}, curand_gen); // Allocate memory for output tensor auto output = zeros(cnn.get_output_dims()); std::string fwd_algo_s = cnn.get_fwd_algo_string(); //Warm up cnn.forward(input, filter, output); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < num_repeats; ++i) { cnn.forward(input, filter, output); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); int fwd_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats); // Allocate memory for backward pass wrt weights auto delta = rand(cnn.get_output_dims(), curand_gen); auto dW = zeros(std::vector<int>{r, s, c, k}); // Warm up backward cnn.backward_params(input, delta, dW); hipDeviceSynchronize(); start = std::chrono::steady_clock::now(); for (int i = 0; i < num_repeats; ++i) { // Backward pass wrt weights cnn.backward_params(input, delta, dW); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); int bwd_params_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats); //Allocate memory for backward pass wrt inputs auto dX = zeros(std::vector<int>{w, h, c, n}); //Warm up backward inputs cnn.backward_inputs(filter, delta, dX); hipDeviceSynchronize(); start = std::chrono::steady_clock::now(); for (int i = 0; i < num_repeats; ++i) { // Backward pass wrt weights cnn.backward_inputs(filter, delta, dX); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); int bwd_inputs_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats); return std::tuple<int, int, int, std::string>(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s); } int main(int argc, char **argv) { int num_repeats = 1000; // Handles to various cuda libraries, structures hiprandGenerator_t curand_gen; hipFree(0); if (argc > 1) num_repeats = atoi(argv[1]); // Initialize curand_gen and set appropriate seed. hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL); // Vector saves w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride std::vector<std::tuple<int, int, int, int, int, int, int, int, int, int, int>> problems = { std::make_tuple(700, 161, 1, 4, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(700, 161, 1, 8, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(700, 161, 1, 16, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(700, 161, 1, 32, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 4, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 8, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 16, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 32, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(480, 48, 1, 16, 16, 3, 3, 1, 1, 1, 1), std::make_tuple(240, 24, 16, 16, 32, 3, 3, 1, 1, 1, 1), std::make_tuple(120, 12, 32, 16, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(60, 6, 64, 16, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(108, 108, 3, 8, 64, 3, 3, 1, 1, 2, 2), std::make_tuple(54, 54, 64, 8, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(27, 27, 128, 8, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(14, 14, 128, 8, 256, 3, 3, 1, 1, 1, 1), std::make_tuple(7, 7, 256, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(224, 224, 3, 8, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(112, 112, 64, 8, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(56, 56, 128, 8, 256, 3, 3, 1, 1, 1, 1), std::make_tuple(28, 28, 256, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(14, 14, 512, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(7, 7, 512, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(224, 224, 3, 16, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(112, 112, 64, 16, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(56, 56, 128, 16, 256, 3, 3, 1, 1, 1, 1), std::make_tuple(28, 28, 256, 16, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(14, 14, 512, 16, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(7, 7, 512, 16, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(224, 224, 3, 16, 64, 7, 7, 3, 3, 2, 2), std::make_tuple(28, 28, 192, 16, 32, 5, 5, 2, 2, 1, 1), std::make_tuple(28, 28, 192, 16, 64, 1, 1, 0, 0, 1, 1), std::make_tuple(14, 14, 512, 16, 48, 5, 5, 2, 2, 1, 1), std::make_tuple(14, 14, 512, 16, 192, 1, 1, 0, 0, 1, 1), std::make_tuple(7, 7, 832, 16, 256, 1, 1, 0, 0, 1, 1), std::make_tuple(7, 7, 832, 16, 128, 5, 5, 2, 2, 1, 1) }; std::cout << std::setw(30) << "Times" << std::endl; std::cout << std::setfill('-') << std::setw(190) << "-" << std::endl; std::cout << std::setfill(' '); std::cout << " w h c n k r s pad_w pad_h stride_w stride_h fwd_time (usec) bwd_inputs_time (usec) bwd_params_time (usec) total_time (usec) fwd_algo " << std::endl; std::cout << std::setfill('-') << std::setw(190) << "-" << std::endl; std::cout << std::setfill(' '); for (const auto &problem : problems) { // Filter parameters int k, c, r, s; // Input parameters int n, w, h; // Padding int pad_w, pad_h; // Stride int wstride, hstride; std::tie(w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride) = problem; int fwd_time, bwd_inputs_time, bwd_params_time; std::string fwd_algo_s; std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s) = time_cnn(k, c, r, s, n, h, w, pad_h, pad_w, hstride, wstride, num_repeats, curand_gen); std::cout << std::setw(5) << w; std::cout << std::setw(7) << h; std::cout << std::setw(7) << c; std::cout << std::setw(7) << n; std::cout << std::setw(7) << k; std::cout << std::setw(7) << r; std::cout << std::setw(7) << s; std::cout << std::setw(7) << pad_w; std::cout << std::setw(8) << pad_h; std::cout << std::setw(10) << wstride; std::cout << std::setw(10) << hstride; std::cout << std::setw(14) << std::setprecision(7) << fwd_time; std::cout << std::setw(24) << std::setprecision(7) << bwd_inputs_time; std::cout << std::setw(24) << std::setprecision(7) << bwd_params_time; std::cout << std::setw(19) << std::setprecision(8) << fwd_time + bwd_inputs_time + bwd_params_time; std::cout << std::setw(25) << fwd_algo_s; std::cout << std::endl; } // Destroy all the handles hiprandDestroyGenerator(curand_gen); return 0; }
635a26fbade48c6c82db9df001fb4ce49b9ab063.cu
#include <iomanip> #include <memory> #include <chrono> #include <vector> #include <tuple> #include <cuda.h> #include <cudnn.h> #include <curand.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include "tensor.h" #include "cudnn_helper.h" #define USE_GET 0 class cudnnCNN { TensorDescriptor4d<float> x_desc_; TensorDescriptor4d<float> h_desc_; FilterDescriptor4d<float> w_desc_; std::vector<int> output_dims_; int num_repeats_; size_t fwd_workspace_size_; size_t bwd_inputs_workspace_size_; size_t bwd_params_workspace_size_; Tensor<float> fwd_workspace_; Tensor<float> bwd_inputs_workspace_; Tensor<float> bwd_params_workspace_; cudnnConvolutionFwdAlgo_t fwd_algo_; cudnnConvolutionBwdDataAlgo_t bwd_inputs_algo_; cudnnConvolutionBwdFilterAlgo_t bwd_params_algo_; const float alpha_ = 1.f; const float beta_ = 0.f; ConvolutionDescriptor conv_desc_; CudnnHandle cudnn_handle_; public: cudnnCNN(int w, int h, int c, int n, int k, int r, int s, int pad_w, int pad_h, int wstride, int hstride) : cudnn_handle_(), x_desc_(CUDNN_TENSOR_NCHW, n, c, h, w), w_desc_(CUDNN_TENSOR_NCHW, k, c, r, s), conv_desc_(pad_h, pad_w, hstride, wstride) { int out_h, out_w, out_c, out_n; // Get output dimensions CHECK_CUDNN_ERROR(cudnnGetConvolution2dForwardOutputDim(conv_desc_.desc(), x_desc_.desc(), w_desc_.desc(), &out_n, &out_c, &out_h, &out_w)); h_desc_ = TensorDescriptor4d<float>(CUDNN_TENSOR_NCHW, out_n, out_c, out_h, out_w); output_dims_ = {out_w, out_h, out_c, out_n}; #if USE_GET // Pick forward convolution algorithm CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), w_desc_.desc(), conv_desc_.desc(), h_desc_.desc(), CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &fwd_algo_)); #else // Pick forward convolution algorithm cudnnConvolutionFwdAlgoPerf_t fwd_perf; int ret_count; CHECK_CUDNN_ERROR(cudnnFindConvolutionForwardAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), w_desc_.desc(), conv_desc_.desc(), h_desc_.desc(), 1, &ret_count, &fwd_perf)); fwd_algo_ = fwd_perf.algo; #endif // Set fwd workspace size CHECK_CUDNN_ERROR(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle_.handle(), x_desc_.desc(), w_desc_.desc(), conv_desc_.desc(), h_desc_.desc(), fwd_algo_, &fwd_workspace_size_)); fwd_workspace_ = zeros(std::vector<int>{static_cast<int>(fwd_workspace_size_ / sizeof(float)), 1}); #if USE_GET // Pick backward convolution algorithm CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), w_desc_.desc(), CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &bwd_params_algo_)); #else cudnnConvolutionBwdFilterAlgoPerf_t filter_perf; CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn_handle_.handle(), x_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), w_desc_.desc(), 1, &ret_count, &filter_perf)); bwd_params_algo_ = filter_perf.algo; #endif // Backward params workspace CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle_.handle(), x_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), w_desc_.desc(), bwd_params_algo_, &bwd_params_workspace_size_)); bwd_params_workspace_ = zeros(std::vector<int>{static_cast<int>(bwd_params_workspace_size_ / sizeof(float)), 1}); #if USE_GET // Pick backward wrt inputs convolution algorithm CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(), w_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), x_desc_.desc(), CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &bwd_inputs_algo_)); #else cudnnConvolutionBwdDataAlgoPerf_t data_perf; CHECK_CUDNN_ERROR(cudnnFindConvolutionBackwardDataAlgorithm(cudnn_handle_.handle(), w_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), x_desc_.desc(), 1, &ret_count, &data_perf)); bwd_inputs_algo_ = data_perf.algo; #endif CHECK_CUDNN_ERROR(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle_.handle(), w_desc_.desc(), h_desc_.desc(), conv_desc_.desc(), x_desc_.desc(), bwd_inputs_algo_, &bwd_inputs_workspace_size_)); bwd_inputs_workspace_ = zeros(std::vector<int>{static_cast<int>(bwd_inputs_workspace_size_ / sizeof(float)), 1}); } std::vector<int> get_output_dims() { return output_dims_; } std::string get_fwd_algo_string() { if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM) return "IMPLICIT_GEMM"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) return "IMPLICIT_PRECOMP_GEMM"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_GEMM) return "GEMM"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_DIRECT) return "DIRECT"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT) return "FFT"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING) return "FFT_TILING"; else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD) return "WINOGRAD"; #if CUDNN_MAJOR >= 6 else if (fwd_algo_ == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED) return "WINOGRAD_NONFUSED"; #endif else { std::stringstream ss; ss << "Illegal algorithm passed to get_fwd_algo_string. Algo: " << fwd_algo_ << std::endl; throw std::runtime_error(ss.str()); } } void forward(Tensor<float> x, Tensor<float> filter, Tensor<float> h) { // Convolution forward. CHECK_CUDNN_ERROR(cudnnConvolutionForward(cudnn_handle_.handle(), &alpha_, x_desc_.desc(), x.begin(), w_desc_.desc(), filter.begin(), conv_desc_.desc(), fwd_algo_, fwd_workspace_.begin(), fwd_workspace_size_, &beta_, h_desc_.desc(), h.begin())); } void backward_params(Tensor<float> x, Tensor<float> delta, Tensor<float> dW) { CHECK_CUDNN_ERROR(cudnnConvolutionBackwardFilter(cudnn_handle_.handle(), &alpha_, x_desc_.desc(), x.begin(), h_desc_.desc(), delta.begin(), conv_desc_.desc(), bwd_params_algo_, bwd_params_workspace_.begin(), bwd_params_workspace_size_, &beta_, w_desc_.desc(), dW.begin())); } void backward_inputs(Tensor<float> filter, Tensor<float> delta, Tensor<float> dX) { CHECK_CUDNN_ERROR(cudnnConvolutionBackwardData(cudnn_handle_.handle(), &alpha_, w_desc_.desc(), filter.begin(), h_desc_.desc(), delta.begin(), conv_desc_.desc(), bwd_inputs_algo_, bwd_inputs_workspace_.begin(), bwd_inputs_workspace_size_, &beta_, x_desc_.desc(), dX.begin())); } }; std::tuple<int, int, int, std::string> time_cnn( int k, int c, int r, int s, int n, int h, int w, int pad_h, int pad_w, int hstride, int wstride, int num_repeats, curandGenerator_t curand_gen ) { cudnnCNN cnn(w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride); // Allocate memory for filter auto filter = rand(std::vector<int>{r, s, c, k}, curand_gen); // Allocate memory for input auto input = rand(std::vector<int>{w, h, c, n}, curand_gen); // Allocate memory for output tensor auto output = zeros(cnn.get_output_dims()); std::string fwd_algo_s = cnn.get_fwd_algo_string(); //Warm up cnn.forward(input, filter, output); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < num_repeats; ++i) { cnn.forward(input, filter, output); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); int fwd_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats); // Allocate memory for backward pass wrt weights auto delta = rand(cnn.get_output_dims(), curand_gen); auto dW = zeros(std::vector<int>{r, s, c, k}); // Warm up backward cnn.backward_params(input, delta, dW); cudaDeviceSynchronize(); start = std::chrono::steady_clock::now(); for (int i = 0; i < num_repeats; ++i) { // Backward pass wrt weights cnn.backward_params(input, delta, dW); } cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); int bwd_params_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats); //Allocate memory for backward pass wrt inputs auto dX = zeros(std::vector<int>{w, h, c, n}); //Warm up backward inputs cnn.backward_inputs(filter, delta, dX); cudaDeviceSynchronize(); start = std::chrono::steady_clock::now(); for (int i = 0; i < num_repeats; ++i) { // Backward pass wrt weights cnn.backward_inputs(filter, delta, dX); } cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); int bwd_inputs_time = static_cast<int>(std::chrono::duration<double, std::micro>(end - start).count() / num_repeats); return std::tuple<int, int, int, std::string>(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s); } int main(int argc, char **argv) { int num_repeats = 1000; // Handles to various cuda libraries, structures curandGenerator_t curand_gen; cudaFree(0); if (argc > 1) num_repeats = atoi(argv[1]); // Initialize curand_gen and set appropriate seed. curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(curand_gen, 123ULL); // Vector saves w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride std::vector<std::tuple<int, int, int, int, int, int, int, int, int, int, int>> problems = { std::make_tuple(700, 161, 1, 4, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(700, 161, 1, 8, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(700, 161, 1, 16, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(700, 161, 1, 32, 32, 5, 20, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 4, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 8, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 16, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(341, 79, 32, 32, 32, 5, 10, 0, 0, 2, 2), std::make_tuple(480, 48, 1, 16, 16, 3, 3, 1, 1, 1, 1), std::make_tuple(240, 24, 16, 16, 32, 3, 3, 1, 1, 1, 1), std::make_tuple(120, 12, 32, 16, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(60, 6, 64, 16, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(108, 108, 3, 8, 64, 3, 3, 1, 1, 2, 2), std::make_tuple(54, 54, 64, 8, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(27, 27, 128, 8, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(14, 14, 128, 8, 256, 3, 3, 1, 1, 1, 1), std::make_tuple(7, 7, 256, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(224, 224, 3, 8, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(112, 112, 64, 8, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(56, 56, 128, 8, 256, 3, 3, 1, 1, 1, 1), std::make_tuple(28, 28, 256, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(14, 14, 512, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(7, 7, 512, 8, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(224, 224, 3, 16, 64, 3, 3, 1, 1, 1, 1), std::make_tuple(112, 112, 64, 16, 128, 3, 3, 1, 1, 1, 1), std::make_tuple(56, 56, 128, 16, 256, 3, 3, 1, 1, 1, 1), std::make_tuple(28, 28, 256, 16, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(14, 14, 512, 16, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(7, 7, 512, 16, 512, 3, 3, 1, 1, 1, 1), std::make_tuple(224, 224, 3, 16, 64, 7, 7, 3, 3, 2, 2), std::make_tuple(28, 28, 192, 16, 32, 5, 5, 2, 2, 1, 1), std::make_tuple(28, 28, 192, 16, 64, 1, 1, 0, 0, 1, 1), std::make_tuple(14, 14, 512, 16, 48, 5, 5, 2, 2, 1, 1), std::make_tuple(14, 14, 512, 16, 192, 1, 1, 0, 0, 1, 1), std::make_tuple(7, 7, 832, 16, 256, 1, 1, 0, 0, 1, 1), std::make_tuple(7, 7, 832, 16, 128, 5, 5, 2, 2, 1, 1) }; std::cout << std::setw(30) << "Times" << std::endl; std::cout << std::setfill('-') << std::setw(190) << "-" << std::endl; std::cout << std::setfill(' '); std::cout << " w h c n k r s pad_w pad_h stride_w stride_h fwd_time (usec) bwd_inputs_time (usec) bwd_params_time (usec) total_time (usec) fwd_algo " << std::endl; std::cout << std::setfill('-') << std::setw(190) << "-" << std::endl; std::cout << std::setfill(' '); for (const auto &problem : problems) { // Filter parameters int k, c, r, s; // Input parameters int n, w, h; // Padding int pad_w, pad_h; // Stride int wstride, hstride; std::tie(w, h, c, n, k, r, s, pad_w, pad_h, wstride, hstride) = problem; int fwd_time, bwd_inputs_time, bwd_params_time; std::string fwd_algo_s; std::tie(fwd_time, bwd_inputs_time, bwd_params_time, fwd_algo_s) = time_cnn(k, c, r, s, n, h, w, pad_h, pad_w, hstride, wstride, num_repeats, curand_gen); std::cout << std::setw(5) << w; std::cout << std::setw(7) << h; std::cout << std::setw(7) << c; std::cout << std::setw(7) << n; std::cout << std::setw(7) << k; std::cout << std::setw(7) << r; std::cout << std::setw(7) << s; std::cout << std::setw(7) << pad_w; std::cout << std::setw(8) << pad_h; std::cout << std::setw(10) << wstride; std::cout << std::setw(10) << hstride; std::cout << std::setw(14) << std::setprecision(7) << fwd_time; std::cout << std::setw(24) << std::setprecision(7) << bwd_inputs_time; std::cout << std::setw(24) << std::setprecision(7) << bwd_params_time; std::cout << std::setw(19) << std::setprecision(8) << fwd_time + bwd_inputs_time + bwd_params_time; std::cout << std::setw(25) << fwd_algo_s; std::cout << std::endl; } // Destroy all the handles curandDestroyGenerator(curand_gen); return 0; }
e505e11fa2efb0afd8689fc1e01aafb18895d4a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> template<typename Iterator, typename Predicate, typename Iterator2> __global__ void is_partitioned_kernel(Iterator first, Iterator last, Predicate pred, Iterator2 result) { *result = thrust::is_partitioned(thrust::seq, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename T> void TestIsPartitionedDeviceSeq(size_t n) { n = thrust::max<size_t>(n, 2); thrust::device_vector<T> v = unittest::random_integers<T>(n); thrust::device_vector<bool> result(1); v[0] = 1; v[1] = 0; hipLaunchKernelGGL(( is_partitioned_kernel), dim3(1),dim3(1), 0, 0, v.begin(), v.end(), is_even<T>(), result.begin()); ASSERT_EQUAL(false, result[0]); thrust::partition(v.begin(), v.end(), is_even<T>()); hipLaunchKernelGGL(( is_partitioned_kernel), dim3(1),dim3(1), 0, 0, v.begin(), v.end(), is_even<T>(), result.begin()); ASSERT_EQUAL(true, result[0]); } DECLARE_VARIABLE_UNITTEST(TestIsPartitionedDeviceSeq);
e505e11fa2efb0afd8689fc1e01aafb18895d4a4.cu
#include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> template<typename Iterator, typename Predicate, typename Iterator2> __global__ void is_partitioned_kernel(Iterator first, Iterator last, Predicate pred, Iterator2 result) { *result = thrust::is_partitioned(thrust::seq, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename T> void TestIsPartitionedDeviceSeq(size_t n) { n = thrust::max<size_t>(n, 2); thrust::device_vector<T> v = unittest::random_integers<T>(n); thrust::device_vector<bool> result(1); v[0] = 1; v[1] = 0; is_partitioned_kernel<<<1,1>>>(v.begin(), v.end(), is_even<T>(), result.begin()); ASSERT_EQUAL(false, result[0]); thrust::partition(v.begin(), v.end(), is_even<T>()); is_partitioned_kernel<<<1,1>>>(v.begin(), v.end(), is_even<T>(), result.begin()); ASSERT_EQUAL(true, result[0]); } DECLARE_VARIABLE_UNITTEST(TestIsPartitionedDeviceSeq);
8258fdf5e2f4e1dbf8d471f01c2d46102fc7cd48.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <algorithm> #include <cstdlib> #include <ctime> #include <hip/hip_runtime.h> #include <stdio.h> #include <cassert> //define the chunk sizes that each threadblock will work on #define BLKXSIZE 32 #define BLKYSIZE 4 #define BLKZSIZE 4 #define Q 19 #define lx 10 #define ly 10 #define lz 5 // for cuda error checking #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ return 1; \ } \ } while (0) template <typename T> __device__ void swap ( T& a, T& b ) { T c(a); a=b; b=c; } template <typename Ttype> Ttype**** create_4d_harray() { Ttype**** array; array = new Ttype***[lx]; assert(array != nullptr); array[0] = (Ttype***)malloc(sizeof(Ttype)*lx*ly*lz*Q); for (int i = 1; i < lx; i++) {//from i=0 to i=1; // array[i] = new Ttype**[ly]; array[i] = array[i-1] + ly*lz*Q; assert(array[i] != nullptr); array[i][0] = (Ttype**)malloc(sizeof(Ttype)*ly*lz*Q); for (int j = 1; j < ly; j++) { //array[i][j] = new Ttype*[lz]; array[i][j] = array[i][j-1] + lz*Q; assert(array[i][j] != nullptr); array[i][j][0] = (Ttype*)malloc(sizeof(Ttype)*Q); for (int k = 1; k < lz; k++) { //array[i][j][k] = new Ttype[n]; array[i][j][k] = array[i][j][k-1]+ Q; assert(array[i][j][k] != nullptr); } } } return array; } template <typename Ttype> void free_4d_harray(Ttype**** array) { for (size_t i = 0; i < lx; ++i) { for (size_t j = 0; j < ly; ++j) { for (size_t k = 0; k < lz; ++k) { delete[] array[i][j][k]; } delete[] array[i][j]; } delete[] array[i]; } delete[] array; } template <typename Ttype> Ttype (*create_4d_darray())[ly][lz][Q] { // const int d_lx = lx, d_ly = ly, d_lz = lz, d_n = n; //Ttype array_4d[lx][ly][lz][Q]; //typedef Ttype array_4d[ly][lz][Q]; //array_4d* dptr_4d_array; Ttype(*dptr_4d_array)[ly][lz][Q]; hipMalloc((void**)&dptr_4d_array, (lx * ly * lz * Q) * sizeof(Ttype)); return dptr_4d_array; } __global__ void gpu_array_swap(int ptr_gpu[][ly][lz][Q]) { // int thread_id = thread_idx(grid_dim, block_dim); unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned idy = blockIdx.y * blockDim.y + threadIdx.y; unsigned idz = blockIdx.z * blockDim.z + threadIdx.z; if ((idx < lx) && (idy < ly) && (idz < lz)) { for (size_t i = 1; i <= 9; i++) swap(ptr_gpu[idx][idy][idz][i], ptr_gpu[idx][idy][idz][i + 9]); } } void set_array(int**** array) { int m = 0; for (int l = 0; l < Q; ++l) { for (int i = 0; i < lz; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lx; ++k) { array[i][j][k][l] = ++m; } } } } } void print_array(int**** array) { for (int i = 0; i < lx; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lz; ++k) { for (int l = 0; l < Q; ++l) { std::cout << array[i][j][k][l] << " "; if (l == (Q - 1)) std::cout << std::endl; } } } } } int main() { //int lx=10,ly=10,lz=5,Q=19; int**** host_4d_array=create_4d_harray<int>(); int(* device_4d_array)[ly][lz][Q]=create_4d_darray<int>(); const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE); const dim3 gridSize(((lx + BLKXSIZE - 1) / BLKXSIZE), ((ly + BLKYSIZE - 1) / BLKYSIZE), ((lz + BLKZSIZE - 1) / BLKZSIZE)); set_array(host_4d_array); print_array(host_4d_array); // allocate GPU device buffers cudaCheckErrors("Failed to allocate device buffer"); hipMemcpy(device_4d_array, host_4d_array, (lx * ly * lz * Q) * sizeof(int), hipMemcpyHostToDevice); // compute result hipLaunchKernelGGL(( gpu_array_swap), dim3(gridSize), dim3(blockSize), 0, 0, device_4d_array); cudaCheckErrors("Kernel launch failure"); // copy output data back to host hipMemcpy(host_4d_array, device_4d_array, ((lx * ly * lz * Q) * sizeof(int)), hipMemcpyDeviceToHost); cudaCheckErrors("CUDA memcpy failure"); free_4d_harray(host_4d_array); hipFree(device_4d_array); cudaCheckErrors("hipFree fail"); return 0; }
8258fdf5e2f4e1dbf8d471f01c2d46102fc7cd48.cu
#include <iostream> #include <algorithm> #include <cstdlib> #include <ctime> #include <cuda.h> #include <stdio.h> #include <cassert> //define the chunk sizes that each threadblock will work on #define BLKXSIZE 32 #define BLKYSIZE 4 #define BLKZSIZE 4 #define Q 19 #define lx 10 #define ly 10 #define lz 5 // for cuda error checking #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ return 1; \ } \ } while (0) template <typename T> __device__ void swap ( T& a, T& b ) { T c(a); a=b; b=c; } template <typename Ttype> Ttype**** create_4d_harray() { Ttype**** array; array = new Ttype***[lx]; assert(array != nullptr); array[0] = (Ttype***)malloc(sizeof(Ttype)*lx*ly*lz*Q); for (int i = 1; i < lx; i++) {//from i=0 to i=1; // array[i] = new Ttype**[ly]; array[i] = array[i-1] + ly*lz*Q; assert(array[i] != nullptr); array[i][0] = (Ttype**)malloc(sizeof(Ttype)*ly*lz*Q); for (int j = 1; j < ly; j++) { //array[i][j] = new Ttype*[lz]; array[i][j] = array[i][j-1] + lz*Q; assert(array[i][j] != nullptr); array[i][j][0] = (Ttype*)malloc(sizeof(Ttype)*Q); for (int k = 1; k < lz; k++) { //array[i][j][k] = new Ttype[n]; array[i][j][k] = array[i][j][k-1]+ Q; assert(array[i][j][k] != nullptr); } } } return array; } template <typename Ttype> void free_4d_harray(Ttype**** array) { for (size_t i = 0; i < lx; ++i) { for (size_t j = 0; j < ly; ++j) { for (size_t k = 0; k < lz; ++k) { delete[] array[i][j][k]; } delete[] array[i][j]; } delete[] array[i]; } delete[] array; } template <typename Ttype> Ttype (*create_4d_darray())[ly][lz][Q] { // const int d_lx = lx, d_ly = ly, d_lz = lz, d_n = n; //Ttype array_4d[lx][ly][lz][Q]; //typedef Ttype array_4d[ly][lz][Q]; //array_4d* dptr_4d_array; Ttype(*dptr_4d_array)[ly][lz][Q]; cudaMalloc((void**)&dptr_4d_array, (lx * ly * lz * Q) * sizeof(Ttype)); return dptr_4d_array; } __global__ void gpu_array_swap(int ptr_gpu[][ly][lz][Q]) { // int thread_id = thread_idx(grid_dim, block_dim); unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned idy = blockIdx.y * blockDim.y + threadIdx.y; unsigned idz = blockIdx.z * blockDim.z + threadIdx.z; if ((idx < lx) && (idy < ly) && (idz < lz)) { for (size_t i = 1; i <= 9; i++) swap(ptr_gpu[idx][idy][idz][i], ptr_gpu[idx][idy][idz][i + 9]); } } void set_array(int**** array) { int m = 0; for (int l = 0; l < Q; ++l) { for (int i = 0; i < lz; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lx; ++k) { array[i][j][k][l] = ++m; } } } } } void print_array(int**** array) { for (int i = 0; i < lx; ++i) { for (int j = 0; j < ly; ++j) { for (int k = 0; k < lz; ++k) { for (int l = 0; l < Q; ++l) { std::cout << array[i][j][k][l] << " "; if (l == (Q - 1)) std::cout << std::endl; } } } } } int main() { //int lx=10,ly=10,lz=5,Q=19; int**** host_4d_array=create_4d_harray<int>(); int(* device_4d_array)[ly][lz][Q]=create_4d_darray<int>(); const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE); const dim3 gridSize(((lx + BLKXSIZE - 1) / BLKXSIZE), ((ly + BLKYSIZE - 1) / BLKYSIZE), ((lz + BLKZSIZE - 1) / BLKZSIZE)); set_array(host_4d_array); print_array(host_4d_array); // allocate GPU device buffers cudaCheckErrors("Failed to allocate device buffer"); cudaMemcpy(device_4d_array, host_4d_array, (lx * ly * lz * Q) * sizeof(int), cudaMemcpyHostToDevice); // compute result gpu_array_swap<<<gridSize, blockSize>>>(device_4d_array); cudaCheckErrors("Kernel launch failure"); // copy output data back to host cudaMemcpy(host_4d_array, device_4d_array, ((lx * ly * lz * Q) * sizeof(int)), cudaMemcpyDeviceToHost); cudaCheckErrors("CUDA memcpy failure"); free_4d_harray(host_4d_array); cudaFree(device_4d_array); cudaCheckErrors("cudaFree fail"); return 0; }
6ba89a69011980073779f0925642b24c90f5f0f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2019 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::dense(std::string name, const Tensor& input, int outDim, ActiMode activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) { Linear *li = new Linear(*this, name, input, outDim, activation, use_bias, kernel_initializer, bias_initializer); layers.push_back(li); Parameter kernel, bias; kernel.tensor = li->kernel; kernel.op = li; bias.tensor = li->bias; bias.op = li; parameters.push_back(kernel); parameters.push_back(bias); return li->output; } // Deprecated API -- TO BE REMOVED Tensor FFModel::linear(std::string name, const Tensor& input, int out_dim, ActiMode activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) { return dense(name, input, out_dim, activation, kernel_initializer, bias_initializer); } Linear::Linear(FFModel& model, const std::string& pcname, const Tensor& _input, int out_dim, ActiMode _activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) : Op(pcname, _input), activation(_activation), profiling(model.config.profiling) { assert(_input.numDim == 2); // Retrive the task indexspace for the op task_is = IndexSpaceT<2>(model.get_or_create_task_is(pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1; int in_dim = _input.adim[0]; int batch_size = _input.adim[1]; { const int dims[2] = {batch_size, out_dim}; output = model.create_tensor<2>(dims, task_is, DT_FLOAT); } // Create kernel tensor { const int dims[2] = {out_dim, in_dim}; kernel = model.create_weight<2>(dims, task_is, DT_FLOAT, kernel_initializer); } // Create bias tensor if (use_bias) { const int dims[1] = {out_dim}; bias = model.create_weight<1>(dims, task_is, DT_FLOAT, bias_initializer); } // Compute partition bound for input Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Create replica tensor if (num_par_c > 1) { const int dims[3] = {num_par_c, batch_size, in_dim}; replica = model.create_replica<3>(dims, task_is, DT_FLOAT); { Rect<2> extent(Point<2>(0, 0), Point<2>(in_dim-1, batch_size/num_par_n-1)); Transform<2, 2> transform; transform[0][0] = 0; transform[0][1] = 0; transform[1][0] = 0; transform[1][1] = batch_size/num_par_n; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); } // Backward use the same ip as inputs[0] input_grad_lps[0] = inputs[0].part_grad; { IndexSpaceT<2> input_task_is = IndexSpaceT<2>(model.get_or_create_task_is(input_rect)); const coord_t num_parts[2] = {input_rect.hi[0] - input_rect.lo[0] + 1, input_rect.hi[1] - input_rect.lo[1] + 1}; Rect<3> extent(Point<3>(0, 0, 0), Point<3>(in_dim/num_parts[0]-1, batch_size/num_parts[1]-1, num_par_c-1)); Transform<3, 2> transform; for (int i = 0; i < 3; i++) for (int j = 0; j < 2; j++) transform[i][j] = 0; transform[0][0] = in_dim / num_parts[0]; transform[1][1] = batch_size / num_parts[1]; IndexPartition ip = runtime->create_partition_by_restriction( ctx, replica.region_grad.get_index_space(), input_task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); // Note we use replica.part to save how to partition the replica // to compute input_grad_lps replica.part = runtime->get_logical_partition( ctx, replica.region_grad, ip); } } else { if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { Rect<2> extent(Point<2>(0,0), Point<2>(in_dim-1,batch_size/num_par_n-1)); Transform<2, 2> transform; transform[0][0] = 0; transform[0][1] = 0; transform[1][0] = 0; transform[1][1] = batch_size / num_par_n; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); input_grad_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region_grad, ip); } } } /* regions[0](I): input regions[1](O): output regions[2]: replica regions[3](I): kernel regions[4](I): bias */ OpMeta* Linear::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Linear* linear = (Linear*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 2> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n", in_dim, out_dim, batch_size); LinearMeta* m = new LinearMeta(handle); float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size); for (int i = 0; i < batch_size; i++) dram_one_ptr[i] = 1.0f; float* fb_one_ptr; checkCUDA(hipMalloc(&fb_one_ptr, sizeof(float) * batch_size)); checkCUDA(hipMemcpy(fb_one_ptr, dram_one_ptr, sizeof(float) * batch_size, hipMemcpyHostToDevice)); m->one_ptr = (const float*) fb_one_ptr; if (linear->activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (linear->activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, out_dim, 1, 1)); } return m; } void Linear::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(kernel.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I); input regions[1](O): output regions[2](I): kernel regions[3](I): bias */ __host__ void Linear::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); float alpha = 1.0f, beta = 0.0f; const Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorR<float, 2> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_input.rect.hi[1] - acc_output.rect.lo[1] + 1; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_bias.rect.volume() == out_dim); hipEvent_t t_start, t_end; if (linear->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(m->handle.blas, stream)); checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N, out_dim, batch_size, in_dim, &alpha, acc_kernel.ptr, in_dim, acc_input.ptr, in_dim, &beta, acc_output.ptr, out_dim)); checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N, out_dim, batch_size, 1, &alpha, acc_bias.ptr, 1, m->one_ptr, 1, &alpha, acc_output.ptr, out_dim)); if (linear->activation != AC_MODE_NONE) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, acc_output.ptr, &beta, m->outputTensor, acc_output.ptr)); } if (linear->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("Linear forward time = %.2lfms\n", elapsed); print_tensor<2, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]"); print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]"); print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]"); checkCUDA(hipDeviceSynchronize()); } } void Linear::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(kernel.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void sigmoid_backward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]); } } /* regions[0](I): input regions[1](O): replica_grad or input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](O): filter_grad regions[6](O): bias_grad */ __host__ void Linear::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 7); assert(task->regions.size() == 7); float alpha = 1.0f, beta = 0.0f; const Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); float* input_grad = NULL; TensorAccessorR<float, 2> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorR<float, 2> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int batch_size = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; Domain domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); if (domain.get_dim() == 3) { TensorAccessorW<float, 3> acc_replica_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); assert(acc_replica_grad.rect.volume() == in_dim * batch_size); input_grad = acc_replica_grad.ptr; } else { TensorAccessorW<float, 2> acc_replica_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); assert(acc_replica_grad.rect.volume() == in_dim * batch_size); input_grad = acc_replica_grad.ptr; } TensorAccessorW<float, 2> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_kernel_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, false/*readOutput*/); // make sure the sizes match assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_output_grad.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_kernel_grad.rect.volume() == in_dim * out_dim); assert(acc_bias_grad.rect.volume() == out_dim); hipEvent_t t_start, t_end; if (linear->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(m->handle.blas, stream)); if (linear->activation == AC_MODE_RELU) { hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(acc_output.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, acc_output_grad.ptr, acc_output.ptr, acc_output.rect.volume()); } else if (linear->activation == AC_MODE_SIGMOID) { hipLaunchKernelGGL(( sigmoid_backward), dim3(GET_BLOCKS(acc_output.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, acc_output_grad.ptr, acc_output.ptr, acc_output.rect.volume()); } else { // TODO: only support relu and sigmoid for now assert(linear->activation == AC_MODE_NONE); } // Compute weight gradiant checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_T, in_dim, out_dim, batch_size, &alpha, acc_input.ptr, in_dim, acc_output_grad.ptr, out_dim, &beta, acc_kernel_grad.ptr, in_dim)); // Compute bias gradiant checkCUDA(hipblasSgemv(m->handle.blas, HIPBLAS_OP_N, out_dim, batch_size, &alpha, acc_output_grad.ptr, out_dim, m->one_ptr, 1, &beta, acc_bias_grad.ptr, 1)); // Compute data gradiant checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_N, in_dim, batch_size, out_dim, &alpha, acc_kernel.ptr, in_dim, acc_output_grad.ptr, out_dim, &beta, input_grad, in_dim)); if (linear->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("Linear backward time = %.2lfms\n", elapsed); print_tensor<2, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]"); print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]"); print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]"); print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]"); checkCUDA(hipDeviceSynchronize()); } } /* regions[0](O): input_grad regions[1](I): replicas */ __host__ void Linear::backward2_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { float alpha = 1.0f; const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorW<float, 2> acc_input( regions[0], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 3> acc_replica( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input.rect.hi[0] == acc_replica.rect.hi[0]); assert(acc_input.rect.lo[0] == acc_replica.rect.lo[0]); assert(acc_input.rect.hi[1] == acc_replica.rect.hi[1]); assert(acc_input.rect.lo[1] == acc_replica.rect.lo[1]); hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDA(hipblasSetStream(m->handle.blas, stream)); int num_replica = acc_replica.rect.hi[2] - acc_replica.rect.lo[2] + 1; const float *replica_ptr = acc_replica.ptr; for (int i = 1; i < num_replica; i++) { checkCUDA(hipblasSaxpy(m->handle.blas, acc_input.rect.volume(), &alpha, replica_ptr, 1, acc_input.ptr, 1)); replica_ptr += acc_input.rect.volume(); } } void Linear::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } { IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](O): replica_grad if (replica.region != LogicalRegion::NO_REGION) { launcher.add_region_requirement( RegionRequirement(replica.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); } else { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, output.region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(output.part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, output.region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(kernel.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(4, FID_DATA); // regions[5](O): filter_grad launcher.add_region_requirement( RegionRequirement(kernel.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, kernel.region_grad)); launcher.add_field(5, FID_DATA); // regions[6](O): bias_grad launcher.add_region_requirement( RegionRequirement(bias.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, bias.region_grad)); launcher.add_field(6, FID_DATA); runtime->execute_index_space(ctx, launcher); } if (replica.region != LogicalRegion::NO_REGION) { // We aggregate parameters from replica tensor to input tensor // Note we use input's task_is to reduce extra data transfers Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part_grad.get_index_partition()); IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect)); IndexLauncher launcher(LINEAR_BWD2_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); // Note that replica.part save's a partition of replica.region_grad launcher.add_region_requirement( RegionRequirement(replica.part, 0/*partition id*/, READ_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } }
6ba89a69011980073779f0925642b24c90f5f0f8.cu
/* Copyright 2019 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::dense(std::string name, const Tensor& input, int outDim, ActiMode activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) { Linear *li = new Linear(*this, name, input, outDim, activation, use_bias, kernel_initializer, bias_initializer); layers.push_back(li); Parameter kernel, bias; kernel.tensor = li->kernel; kernel.op = li; bias.tensor = li->bias; bias.op = li; parameters.push_back(kernel); parameters.push_back(bias); return li->output; } // Deprecated API -- TO BE REMOVED Tensor FFModel::linear(std::string name, const Tensor& input, int out_dim, ActiMode activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) { return dense(name, input, out_dim, activation, kernel_initializer, bias_initializer); } Linear::Linear(FFModel& model, const std::string& pcname, const Tensor& _input, int out_dim, ActiMode _activation, bool use_bias, Initializer* kernel_initializer, Initializer* bias_initializer) : Op(pcname, _input), activation(_activation), profiling(model.config.profiling) { assert(_input.numDim == 2); // Retrive the task indexspace for the op task_is = IndexSpaceT<2>(model.get_or_create_task_is(pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1; int in_dim = _input.adim[0]; int batch_size = _input.adim[1]; { const int dims[2] = {batch_size, out_dim}; output = model.create_tensor<2>(dims, task_is, DT_FLOAT); } // Create kernel tensor { const int dims[2] = {out_dim, in_dim}; kernel = model.create_weight<2>(dims, task_is, DT_FLOAT, kernel_initializer); } // Create bias tensor if (use_bias) { const int dims[1] = {out_dim}; bias = model.create_weight<1>(dims, task_is, DT_FLOAT, bias_initializer); } // Compute partition bound for input Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Create replica tensor if (num_par_c > 1) { const int dims[3] = {num_par_c, batch_size, in_dim}; replica = model.create_replica<3>(dims, task_is, DT_FLOAT); { Rect<2> extent(Point<2>(0, 0), Point<2>(in_dim-1, batch_size/num_par_n-1)); Transform<2, 2> transform; transform[0][0] = 0; transform[0][1] = 0; transform[1][0] = 0; transform[1][1] = batch_size/num_par_n; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); } // Backward use the same ip as inputs[0] input_grad_lps[0] = inputs[0].part_grad; { IndexSpaceT<2> input_task_is = IndexSpaceT<2>(model.get_or_create_task_is(input_rect)); const coord_t num_parts[2] = {input_rect.hi[0] - input_rect.lo[0] + 1, input_rect.hi[1] - input_rect.lo[1] + 1}; Rect<3> extent(Point<3>(0, 0, 0), Point<3>(in_dim/num_parts[0]-1, batch_size/num_parts[1]-1, num_par_c-1)); Transform<3, 2> transform; for (int i = 0; i < 3; i++) for (int j = 0; j < 2; j++) transform[i][j] = 0; transform[0][0] = in_dim / num_parts[0]; transform[1][1] = batch_size / num_parts[1]; IndexPartition ip = runtime->create_partition_by_restriction( ctx, replica.region_grad.get_index_space(), input_task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); // Note we use replica.part to save how to partition the replica // to compute input_grad_lps replica.part = runtime->get_logical_partition( ctx, replica.region_grad, ip); } } else { if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { Rect<2> extent(Point<2>(0,0), Point<2>(in_dim-1,batch_size/num_par_n-1)); Transform<2, 2> transform; transform[0][0] = 0; transform[0][1] = 0; transform[1][0] = 0; transform[1][1] = batch_size / num_par_n; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); input_grad_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region_grad, ip); } } } /* regions[0](I): input regions[1](O): output regions[2]: replica regions[3](I): kernel regions[4](I): bias */ OpMeta* Linear::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const Linear* linear = (Linear*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 2> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n", in_dim, out_dim, batch_size); LinearMeta* m = new LinearMeta(handle); float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size); for (int i = 0; i < batch_size; i++) dram_one_ptr[i] = 1.0f; float* fb_one_ptr; checkCUDA(cudaMalloc(&fb_one_ptr, sizeof(float) * batch_size)); checkCUDA(cudaMemcpy(fb_one_ptr, dram_one_ptr, sizeof(float) * batch_size, cudaMemcpyHostToDevice)); m->one_ptr = (const float*) fb_one_ptr; if (linear->activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (linear->activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, out_dim, 1, 1)); } return m; } void Linear::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(kernel.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I); input regions[1](O): output regions[2](I): kernel regions[3](I): bias */ __host__ void Linear::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); float alpha = 1.0f, beta = 0.0f; const Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorR<float, 2> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_input.rect.hi[1] - acc_output.rect.lo[1] + 1; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_bias.rect.volume() == out_dim); cudaEvent_t t_start, t_end; if (linear->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, out_dim, batch_size, in_dim, &alpha, acc_kernel.ptr, in_dim, acc_input.ptr, in_dim, &beta, acc_output.ptr, out_dim)); checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, out_dim, batch_size, 1, &alpha, acc_bias.ptr, 1, m->one_ptr, 1, &alpha, acc_output.ptr, out_dim)); if (linear->activation != AC_MODE_NONE) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, acc_output.ptr, &beta, m->outputTensor, acc_output.ptr)); } if (linear->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Linear forward time = %.2lfms\n", elapsed); print_tensor<2, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]"); print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]"); print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]"); checkCUDA(cudaDeviceSynchronize()); } } void Linear::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(kernel.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(bias.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, bias.region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void sigmoid_backward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]); } } /* regions[0](I): input regions[1](O): replica_grad or input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](O): filter_grad regions[6](O): bias_grad */ __host__ void Linear::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 7); assert(task->regions.size() == 7); float alpha = 1.0f, beta = 0.0f; const Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); float* input_grad = NULL; TensorAccessorR<float, 2> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorR<float, 2> acc_output( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int batch_size = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; Domain domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); if (domain.get_dim() == 3) { TensorAccessorW<float, 3> acc_replica_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); assert(acc_replica_grad.rect.volume() == in_dim * batch_size); input_grad = acc_replica_grad.ptr; } else { TensorAccessorW<float, 2> acc_replica_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); assert(acc_replica_grad.rect.volume() == in_dim * batch_size); input_grad = acc_replica_grad.ptr; } TensorAccessorW<float, 2> acc_output_grad( regions[3], task->regions[3], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[4], task->regions[4], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_kernel_grad( regions[5], task->regions[5], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorW<float, 1> acc_bias_grad( regions[6], task->regions[6], FID_DATA, ctx, runtime, false/*readOutput*/); // make sure the sizes match assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_output_grad.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_kernel_grad.rect.volume() == in_dim * out_dim); assert(acc_bias_grad.rect.volume() == out_dim); cudaEvent_t t_start, t_end; if (linear->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); if (linear->activation == AC_MODE_RELU) { reluBackward<<<GET_BLOCKS(acc_output.rect.volume()), CUDA_NUM_THREADS>>>( acc_output_grad.ptr, acc_output.ptr, acc_output.rect.volume()); } else if (linear->activation == AC_MODE_SIGMOID) { sigmoid_backward<<<GET_BLOCKS(acc_output.rect.volume()), CUDA_NUM_THREADS>>>( acc_output_grad.ptr, acc_output.ptr, acc_output.rect.volume()); } else { // TODO: only support relu and sigmoid for now assert(linear->activation == AC_MODE_NONE); } // Compute weight gradiant checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_T, in_dim, out_dim, batch_size, &alpha, acc_input.ptr, in_dim, acc_output_grad.ptr, out_dim, &beta, acc_kernel_grad.ptr, in_dim)); // Compute bias gradiant checkCUDA(cublasSgemv(m->handle.blas, CUBLAS_OP_N, out_dim, batch_size, &alpha, acc_output_grad.ptr, out_dim, m->one_ptr, 1, &beta, acc_bias_grad.ptr, 1)); // Compute data gradiant checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_N, in_dim, batch_size, out_dim, &alpha, acc_kernel.ptr, in_dim, acc_output_grad.ptr, out_dim, &beta, input_grad, in_dim)); if (linear->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Linear backward time = %.2lfms\n", elapsed); print_tensor<2, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]"); print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]"); print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]"); print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]"); checkCUDA(cudaDeviceSynchronize()); } } /* regions[0](O): input_grad regions[1](I): replicas */ __host__ void Linear::backward2_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { float alpha = 1.0f; const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorW<float, 2> acc_input( regions[0], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 3> acc_replica( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input.rect.hi[0] == acc_replica.rect.hi[0]); assert(acc_input.rect.lo[0] == acc_replica.rect.lo[0]); assert(acc_input.rect.hi[1] == acc_replica.rect.hi[1]); assert(acc_input.rect.lo[1] == acc_replica.rect.lo[1]); cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); int num_replica = acc_replica.rect.hi[2] - acc_replica.rect.lo[2] + 1; const float *replica_ptr = acc_replica.ptr; for (int i = 1; i < num_replica; i++) { checkCUDA(cublasSaxpy(m->handle.blas, acc_input.rect.volume(), &alpha, replica_ptr, 1, acc_input.ptr, 1)); replica_ptr += acc_input.rect.volume(); } } void Linear::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } { IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](O): replica_grad if (replica.region != LogicalRegion::NO_REGION) { launcher.add_region_requirement( RegionRequirement(replica.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); } else { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, output.region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(output.part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, output.region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(kernel.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, kernel.region)); launcher.add_field(4, FID_DATA); // regions[5](O): filter_grad launcher.add_region_requirement( RegionRequirement(kernel.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, kernel.region_grad)); launcher.add_field(5, FID_DATA); // regions[6](O): bias_grad launcher.add_region_requirement( RegionRequirement(bias.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, bias.region_grad)); launcher.add_field(6, FID_DATA); runtime->execute_index_space(ctx, launcher); } if (replica.region != LogicalRegion::NO_REGION) { // We aggregate parameters from replica tensor to input tensor // Note we use input's task_is to reduce extra data transfers Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part_grad.get_index_partition()); IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect)); IndexLauncher launcher(LINEAR_BWD2_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); // Note that replica.part save's a partition of replica.region_grad launcher.add_region_requirement( RegionRequirement(replica.part, 0/*partition id*/, READ_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } }
687c86f988ace5b515656639a3107244622a1768.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zcompact.cu normal z -> d, Fri Jul 18 17:34:28 2014 @author Stan Tomov */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread handles one row, iterating across all columns. */ __global__ void dcompact_kernel( int m, int n, double *dA, int ldda, double *dnorms, double tol, magma_index_t *active, magma_index_t *cBlock) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (dnorms[j] > tol && active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } else if (i==0) active[j] = 0; } } if (i==0) *cBlock = cBlockSize; } __global__ void dcompactactive_kernel( int m, int n, double *dA, int ldda, magma_index_t *active) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } } } } /* ===================================================================== */ /** Purpose ------- ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and compacts them into the cBlock size<=n vectors that have norms > tol. The active mask array has 1 or 0, showing if a vector remained or not in the compacted resulting set of vectors. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dnorms DOUBLE PRECISION array, dimension N The norms of the N vectors in dA @param[in] tol DOUBLE PRECISON The tolerance value used in the criteria to compact or not. @param[out] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @param[out] cBlock magma_index_t* The number of vectors that remain in dA (i.e., with norms > tol). @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" void magma_dcompact( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, double *dnorms, double tol, magma_index_t *active, magma_index_t *cBlock) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); hipLaunchKernelGGL(( dcompact_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, dA, ldda, dnorms, tol, active, active+n ); magma_index_getvector( 1, active+n, 1, cBlock, 1 ); } /* ===================================================================== */ /** Purpose ------- ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an array of 1s and 0sindicating which vectors to compact (for 1s) and which to disregard (for 0s). Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @ingroup magmasparse_d ********************************************************************/ extern "C" void magma_dcompactActive( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, magma_index_t *active) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); hipLaunchKernelGGL(( dcompactactive_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, dA, ldda, active); } /* ===================================================================== */
687c86f988ace5b515656639a3107244622a1768.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zcompact.cu normal z -> d, Fri Jul 18 17:34:28 2014 @author Stan Tomov */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread handles one row, iterating across all columns. */ __global__ void dcompact_kernel( int m, int n, double *dA, int ldda, double *dnorms, double tol, magma_index_t *active, magma_index_t *cBlock) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (dnorms[j] > tol && active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } else if (i==0) active[j] = 0; } } if (i==0) *cBlock = cBlockSize; } __global__ void dcompactactive_kernel( int m, int n, double *dA, int ldda, magma_index_t *active) { // dA is processed across row i (by the current thread) int i = blockIdx.x*blockDim.x + threadIdx.x; int cBlockSize = 0; if ( i < m ) { dA += i; for(int j = 0; j<n; j++){ if (active[j]){ dA[ldda*cBlockSize] = dA[ldda*j]; cBlockSize++; } } } } /* ===================================================================== */ /** Purpose ------- ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and compacts them into the cBlock size<=n vectors that have norms > tol. The active mask array has 1 or 0, showing if a vector remained or not in the compacted resulting set of vectors. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dnorms DOUBLE PRECISION array, dimension N The norms of the N vectors in dA @param[in] tol DOUBLE PRECISON The tolerance value used in the criteria to compact or not. @param[out] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @param[out] cBlock magma_index_t* The number of vectors that remain in dA (i.e., with norms > tol). @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" void magma_dcompact( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, double *dnorms, double tol, magma_index_t *active, magma_index_t *cBlock) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); dcompact_kernel<<< grid, threads, 0, magma_stream >>>( m, n, dA, ldda, dnorms, tol, active, active+n ); magma_index_getvector( 1, active+n, 1, cBlock, 1 ); } /* ===================================================================== */ /** Purpose ------- ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an array of 1s and 0sindicating which vectors to compact (for 1s) and which to disregard (for 0s). Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in,out] dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] active INTEGER array, dimension N A mask of 1s and 0s showing if a vector remains or has been removed @ingroup magmasparse_d ********************************************************************/ extern "C" void magma_dcompactActive( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, magma_index_t *active) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); dcompactactive_kernel<<< grid, threads, 0, magma_stream >>>( m, n, dA, ldda, active); } /* ===================================================================== */
d73568d29afccd5c4a61267cb8b9e43207fec719.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmdot_shfl.cu, normal z -> s, Mon Jun 25 18:24:25 2018 @author Moritz Kreutzer */ #include "magmasparse_internal.h" #include "magmasparse_s.h" #define BLOCK_SIZE 512 #define PRECISION_s #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION <= 6000) // CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier. // from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ __device__ inline real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<float*>(&a); } #endif template<typename T> __inline__ __device__ T warpReduceSum(T val) { #if __CUDA_ARCH__ >= 300 #if __CUDACC_VER_MAJOR__ < 9 val += __shfl_down(val, 16); val += __shfl_down(val, 8); val += __shfl_down(val, 4); val += __shfl_down(val, 2); val += __shfl_down(val, 1); #else val += __shfl_down_sync(0xffffffff,val, 16); val += __shfl_down_sync(0xffffffff,val, 8); val += __shfl_down_sync(0xffffffff,val, 4); val += __shfl_down_sync(0xffffffff,val, 2); val += __shfl_down_sync(0xffffffff,val, 1); #endif #endif return val; } #ifdef PRECISION_z template<> __inline__ __device__ float warpReduceSum<float>(float val) { #if __CUDA_ARCH__ >= 300 int4 a = *reinterpret_cast<int4*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.z += __shfl_down(a.z, 16); a.w += __shfl_down(a.w, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.z += __shfl_down(a.z, 8); a.w += __shfl_down(a.w, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.z += __shfl_down(a.z, 4); a.w += __shfl_down(a.w, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.z += __shfl_down(a.z, 2); a.w += __shfl_down(a.w, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); a.z += __shfl_down(a.z, 1); a.w += __shfl_down(a.w, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.z += __shfl_down_sync(0xffffffff,a.z, 16); a.w += __shfl_down_sync(0xffffffff,a.w, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.z += __shfl_down_sync(0xffffffff,a.z, 8); a.w += __shfl_down_sync(0xffffffff,a.w, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.z += __shfl_down_sync(0xffffffff,a.z, 4); a.w += __shfl_down_sync(0xffffffff,a.w, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.z += __shfl_down_sync(0xffffffff,a.z, 2); a.w += __shfl_down_sync(0xffffffff,a.w, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); a.z += __shfl_down_sync(0xffffffff,a.z, 1); a.w += __shfl_down_sync(0xffffffff,a.w, 1); #endif #endif return val; } #endif // PRECISION_z #ifdef PRECISION_c template<> __inline__ __device__ magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val) { #if __CUDA_ARCH__ >= 300 float2 a = *reinterpret_cast<float2*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); #endif #endif return val; } #endif // PRECISION_c template<typename T> __inline__ __device__ T blockReduceSum_1D(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_S_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __inline__ __device__ T blockReduceSum(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_S_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N) { T sum = MAGMA_S_MAKE(0.0, 0.0); //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum<T>(sum); if (threadIdx.x == 0) out[blockIdx.x]=sum; } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_sblockdot_kernel_shuffle( int n, int k, const float * __restrict__ v, const float * __restrict__ r, float * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = threadIdx.y; float tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_S_ZERO; } tmp = blockReduceSum(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_sblockdot_kernel_shuffle_1dblock( int n, int k, const float * __restrict__ v, const float * __restrict__ r, float * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for (j=0; j < k; j++) { float tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_S_ZERO; } tmp = blockReduceSum_1D(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_smdotc_shfl( magma_int_t n, magma_int_t k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { if ( magma_getdevice_arch() < 300 ) { return magma_smdotc( n, k, v, r, d1, d2, skp, queue ); } else if (1) { // 1D block kernel seems to be always faster dim3 block( BLOCK_SIZE ); dim3 grid( magma_ceildiv( n, block.x ) ); hipLaunchKernelGGL(( magma_sblockdot_kernel_shuffle_1dblock), dim3(grid), dim3(block), 32*sizeof(float), queue->cuda_stream() , n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { hipLaunchKernelGGL(( deviceReduceKernel<float>) , dim3(1), dim3(1024), 32*sizeof(float), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x); } } else { dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k ); while (block.x*block.y > 1024) { block.x -= 32; } dim3 grid( magma_ceildiv( n, block.x ) ); hipLaunchKernelGGL(( magma_sblockdot_kernel_shuffle), dim3(grid), dim3(block), 32*k*sizeof(float), queue->cuda_stream() , n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { hipLaunchKernelGGL(( deviceReduceKernel<float>) , dim3(1), dim3(1024), 32*sizeof(float), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x); } } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgemvmdot_shfl( magma_int_t n, magma_int_t k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { if (k == 1) { // call CUBLAS dotc, we will never be faster float res = magma_sdot( n, v, 1, r, 1, queue ); magma_ssetvector( 1, &res, 1, skp, 1, queue ); } else if ( magma_getdevice_arch() < 300 ) { return magma_sgemvmdot( n, k, v, r, d1, d2, skp, queue ); } else { magma_smdotc_shfl( n, k, v, r, d1, d2, skp, queue ); } return MAGMA_SUCCESS; }
d73568d29afccd5c4a61267cb8b9e43207fec719.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmdot_shfl.cu, normal z -> s, Mon Jun 25 18:24:25 2018 @author Moritz Kreutzer */ #include "magmasparse_internal.h" #include "magmasparse_s.h" #define BLOCK_SIZE 512 #define PRECISION_s #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION <= 6000) // CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier. // from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ __device__ inline real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<float*>(&a); } #endif template<typename T> __inline__ __device__ T warpReduceSum(T val) { #if __CUDA_ARCH__ >= 300 #if __CUDACC_VER_MAJOR__ < 9 val += __shfl_down(val, 16); val += __shfl_down(val, 8); val += __shfl_down(val, 4); val += __shfl_down(val, 2); val += __shfl_down(val, 1); #else val += __shfl_down_sync(0xffffffff,val, 16); val += __shfl_down_sync(0xffffffff,val, 8); val += __shfl_down_sync(0xffffffff,val, 4); val += __shfl_down_sync(0xffffffff,val, 2); val += __shfl_down_sync(0xffffffff,val, 1); #endif #endif return val; } #ifdef PRECISION_z template<> __inline__ __device__ float warpReduceSum<float>(float val) { #if __CUDA_ARCH__ >= 300 int4 a = *reinterpret_cast<int4*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.z += __shfl_down(a.z, 16); a.w += __shfl_down(a.w, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.z += __shfl_down(a.z, 8); a.w += __shfl_down(a.w, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.z += __shfl_down(a.z, 4); a.w += __shfl_down(a.w, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.z += __shfl_down(a.z, 2); a.w += __shfl_down(a.w, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); a.z += __shfl_down(a.z, 1); a.w += __shfl_down(a.w, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.z += __shfl_down_sync(0xffffffff,a.z, 16); a.w += __shfl_down_sync(0xffffffff,a.w, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.z += __shfl_down_sync(0xffffffff,a.z, 8); a.w += __shfl_down_sync(0xffffffff,a.w, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.z += __shfl_down_sync(0xffffffff,a.z, 4); a.w += __shfl_down_sync(0xffffffff,a.w, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.z += __shfl_down_sync(0xffffffff,a.z, 2); a.w += __shfl_down_sync(0xffffffff,a.w, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); a.z += __shfl_down_sync(0xffffffff,a.z, 1); a.w += __shfl_down_sync(0xffffffff,a.w, 1); #endif #endif return val; } #endif // PRECISION_z #ifdef PRECISION_c template<> __inline__ __device__ magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val) { #if __CUDA_ARCH__ >= 300 float2 a = *reinterpret_cast<float2*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); #endif #endif return val; } #endif // PRECISION_c template<typename T> __inline__ __device__ T blockReduceSum_1D(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_S_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __inline__ __device__ T blockReduceSum(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_S_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N) { T sum = MAGMA_S_MAKE(0.0, 0.0); //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum<T>(sum); if (threadIdx.x == 0) out[blockIdx.x]=sum; } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_sblockdot_kernel_shuffle( int n, int k, const float * __restrict__ v, const float * __restrict__ r, float * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = threadIdx.y; float tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_S_ZERO; } tmp = blockReduceSum(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_sblockdot_kernel_shuffle_1dblock( int n, int k, const float * __restrict__ v, const float * __restrict__ r, float * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for (j=0; j < k; j++) { float tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_S_ZERO; } tmp = blockReduceSum_1D(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_smdotc_shfl( magma_int_t n, magma_int_t k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { if ( magma_getdevice_arch() < 300 ) { return magma_smdotc( n, k, v, r, d1, d2, skp, queue ); } else if (1) { // 1D block kernel seems to be always faster dim3 block( BLOCK_SIZE ); dim3 grid( magma_ceildiv( n, block.x ) ); magma_sblockdot_kernel_shuffle_1dblock<<< grid, block, 32*sizeof(float), queue->cuda_stream() >>>( n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { deviceReduceKernel<float> <<<1, 1024, 32*sizeof(float), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x); } } else { dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k ); while (block.x*block.y > 1024) { block.x -= 32; } dim3 grid( magma_ceildiv( n, block.x ) ); magma_sblockdot_kernel_shuffle<<< grid, block, 32*k*sizeof(float), queue->cuda_stream() >>>( n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { deviceReduceKernel<float> <<<1, 1024, 32*sizeof(float), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x); } } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgemvmdot_shfl( magma_int_t n, magma_int_t k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { if (k == 1) { // call CUBLAS dotc, we will never be faster float res = magma_sdot( n, v, 1, r, 1, queue ); magma_ssetvector( 1, &res, 1, skp, 1, queue ); } else if ( magma_getdevice_arch() < 300 ) { return magma_sgemvmdot( n, k, v, r, d1, d2, skp, queue ); } else { magma_smdotc_shfl( n, k, v, r, d1, d2, skp, queue ); } return MAGMA_SUCCESS; }
a7038850fa6af77540ca92d85f573569515e0924.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" namespace oneflow { namespace { template<typename T> __global__ void FakeQuantizationSymmetric(const T* in_ptr, const T* scale_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; T lower_bound = -upper_bound; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T out = round(in_ptr[gid] / scale); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = out * scale; gid += step; } } template<typename T> __global__ void FakeQuantizationAffine(const T* in_ptr, const T* scale_ptr, const T* zero_point_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit)) - 1; T lower_bound = 0; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T zero_point = zero_point_ptr[scale_idx]; T out = round(in_ptr[gid] / scale + zero_point); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = (out - zero_point) * scale; gid += step; } } } // namespace template<typename T> class GpuFakeQuantizationKernel final : public user_op::OpKernel { public: GpuFakeQuantizationKernel() = default; ~GpuFakeQuantizationKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0); const user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme"); const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit"); const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula"); const int64_t elements = in->shape().elem_cnt(); const int64_t panel_size = in->shape().Count(1); const int64_t scale_size = scale->shape().elem_cnt(); if (quantization_formula == "google") { if (quantization_scheme == "symmetric") { RUN_CUDA_KERNEL((FakeQuantizationSymmetric<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } else { // quantization_scheme == "affine" RUN_CUDA_KERNEL((FakeQuantizationAffine<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), zero_point->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } } else { UNIMPLEMENTED(); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FAKE_QUANTIZATION_KERNEL(dtype) \ REGISTER_USER_KERNEL("fake_quantization") \ .SetCreateFn<GpuFakeQuantizationKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) REGISTER_FAKE_QUANTIZATION_KERNEL(float); REGISTER_FAKE_QUANTIZATION_KERNEL(double); } // namespace oneflow
a7038850fa6af77540ca92d85f573569515e0924.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" namespace oneflow { namespace { template<typename T> __global__ void FakeQuantizationSymmetric(const T* in_ptr, const T* scale_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1; T lower_bound = -upper_bound; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T out = round(in_ptr[gid] / scale); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = out * scale; gid += step; } } template<typename T> __global__ void FakeQuantizationAffine(const T* in_ptr, const T* scale_ptr, const T* zero_point_ptr, const int64_t scale_size, const int64_t elements, const int64_t panel_size, const double quantization_bit, T* out_ptr) { int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; int64_t step = gridDim.x * blockDim.x; T upper_bound = static_cast<T>(pow(2.0, quantization_bit)) - 1; T lower_bound = 0; while (gid < elements) { int64_t channel_index = gid / panel_size; int64_t scale_idx = min(scale_size - 1, channel_index); T scale = scale_ptr[scale_idx]; T zero_point = zero_point_ptr[scale_idx]; T out = round(in_ptr[gid] / scale + zero_point); out = out > upper_bound ? upper_bound : out; out = out < lower_bound ? lower_bound : out; out_ptr[gid] = (out - zero_point) * scale; gid += step; } } } // namespace template<typename T> class GpuFakeQuantizationKernel final : public user_op::OpKernel { public: GpuFakeQuantizationKernel() = default; ~GpuFakeQuantizationKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0); const user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme"); const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit"); const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula"); const int64_t elements = in->shape().elem_cnt(); const int64_t panel_size = in->shape().Count(1); const int64_t scale_size = scale->shape().elem_cnt(); if (quantization_formula == "google") { if (quantization_scheme == "symmetric") { RUN_CUDA_KERNEL((FakeQuantizationSymmetric<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } else { // quantization_scheme == "affine" RUN_CUDA_KERNEL((FakeQuantizationAffine<T>), ctx->device_ctx(), elements, in->dptr<T>(), scale->dptr<T>(), zero_point->dptr<T>(), scale_size, elements, panel_size, quantization_bit, out->mut_dptr<T>()); } } else { UNIMPLEMENTED(); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FAKE_QUANTIZATION_KERNEL(dtype) \ REGISTER_USER_KERNEL("fake_quantization") \ .SetCreateFn<GpuFakeQuantizationKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) REGISTER_FAKE_QUANTIZATION_KERNEL(float); REGISTER_FAKE_QUANTIZATION_KERNEL(double); } // namespace oneflow
715429c42386dd206b8d50e88836e09870951093.hip
// !!! This is a file automatically generated by hipify!!! /* Compile with -lcublas flag */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <rocblas.h> #define N 275 // Matrix size /* static void simple_sgemm(int n, float alpha, const float *A, const float *B, float beta, float *C) { int i, j, k; for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { float prod = 0; for (k = 0; k < n; ++k) { prod += A[k * n + i] * B[j * n + k]; } C[j * n + i] = alpha * prod + beta * C[j * n + i]; } } } */ int main() { // Declare variables float *h_A, *h_B, *h_C; float *d_A = 0, *d_B = 0, *d_C = 0; float alpha = 1.0f, beta = 0.0f; int n2 = N * N; int i; hipblasHandle_t handle; hipblasStatus_t status; // Initialise cuBLAS printf("cuBLAS test running...\n"); status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS initialisation error!\n"); return EXIT_FAILURE; } // Allocate host memory h_A = (float*)malloc(n2 * sizeof(h_A[0])); h_B = (float*)malloc(n2 * sizeof(h_B[0])); h_C = (float*)malloc(n2 * sizeof(h_C[0])); // Fill matrices with test data for (i = 0; i < n2; i++) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; h_C[i] = rand() / (float)RAND_MAX; } // Allocate device memory hipMalloc((void**)&d_A, n2 * sizeof(d_A[0])); hipMalloc((void**)&d_B, n2 * sizeof(d_B[0])); hipMalloc((void**)&d_C, n2 * sizeof(d_C[0])); // Initialise device matrices with host matrices status = hipblasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (write A)\n"); return EXIT_FAILURE; } status = hipblasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (write B)\n"); return EXIT_FAILURE; } status = hipblasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (write C)\n"); return EXIT_FAILURE; } // Perform sgemm status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Kernel execution error\n"); return EXIT_FAILURE; } // Read back result status = hipblasGetVector(n2, sizeof(h_C[0]), d_C, 1, h_C, 1); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (read C)\n"); return EXIT_FAILURE; } // Clean up free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); status = hipblasDestroy(handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS shutdown error!\n"); return EXIT_FAILURE; } return 0; }
715429c42386dd206b8d50e88836e09870951093.cu
/* Compile with -lcublas flag */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda_runtime.h> #include <cublas_v2.h> #define N 275 // Matrix size /* static void simple_sgemm(int n, float alpha, const float *A, const float *B, float beta, float *C) { int i, j, k; for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { float prod = 0; for (k = 0; k < n; ++k) { prod += A[k * n + i] * B[j * n + k]; } C[j * n + i] = alpha * prod + beta * C[j * n + i]; } } } */ int main() { // Declare variables float *h_A, *h_B, *h_C; float *d_A = 0, *d_B = 0, *d_C = 0; float alpha = 1.0f, beta = 0.0f; int n2 = N * N; int i; cublasHandle_t handle; cublasStatus_t status; // Initialise cuBLAS printf("cuBLAS test running...\n"); status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS initialisation error!\n"); return EXIT_FAILURE; } // Allocate host memory h_A = (float*)malloc(n2 * sizeof(h_A[0])); h_B = (float*)malloc(n2 * sizeof(h_B[0])); h_C = (float*)malloc(n2 * sizeof(h_C[0])); // Fill matrices with test data for (i = 0; i < n2; i++) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; h_C[i] = rand() / (float)RAND_MAX; } // Allocate device memory cudaMalloc((void**)&d_A, n2 * sizeof(d_A[0])); cudaMalloc((void**)&d_B, n2 * sizeof(d_B[0])); cudaMalloc((void**)&d_C, n2 * sizeof(d_C[0])); // Initialise device matrices with host matrices status = cublasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (write A)\n"); return EXIT_FAILURE; } status = cublasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (write B)\n"); return EXIT_FAILURE; } status = cublasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (write C)\n"); return EXIT_FAILURE; } // Perform sgemm status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Kernel execution error\n"); return EXIT_FAILURE; } // Read back result status = cublasGetVector(n2, sizeof(h_C[0]), d_C, 1, h_C, 1); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Device access error (read C)\n"); return EXIT_FAILURE; } // Clean up free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); status = cublasDestroy(handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS shutdown error!\n"); return EXIT_FAILURE; } return 0; }
7af2b297cf472e2cbc53dcfb318b04be163e61d8.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zgeisai_16.cu, normal z -> s, Sun Nov 20 20:20:42 2016 */ #include "magmasparse_internal.h" #define PRECISION_s #define REAL #define BLOCKSIZE 16 #define WARP_SIZE 16 #define WRP 16 #define WRQ 4 #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION >= 7000) __device__ void strsv_lower_16kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_upper_16kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_lower_16kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_9(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 9; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_10(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 10; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_11(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 11; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_12(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 12; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_13(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 13; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_14(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 14; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_15(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 15; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_16(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 16; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_lower_16kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_lower_16kernel_1( dA, dB ); break; case 2: strsv_lower_16kernel_2( dA, dB ); break; case 3: strsv_lower_16kernel_3( dA, dB ); break; case 4: strsv_lower_16kernel_4( dA, dB ); break; case 5: strsv_lower_16kernel_5( dA, dB ); break; case 6: strsv_lower_16kernel_6( dA, dB ); break; case 7: strsv_lower_16kernel_7( dA, dB ); break; case 8: strsv_lower_16kernel_8( dA, dB ); break; case 9: strsv_lower_16kernel_9( dA, dB ); break; case 10: strsv_lower_16kernel_10( dA, dB ); break; case 11: strsv_lower_16kernel_11( dA, dB ); break; case 12: strsv_lower_16kernel_12( dA, dB ); break; case 13: strsv_lower_16kernel_13( dA, dB ); break; case 14: strsv_lower_16kernel_14( dA, dB ); break; case 15: strsv_lower_16kernel_15( dA, dB ); break; case 16: strsv_lower_16kernel_16( dA, dB ); break; default: strsv_lower_16kernel_general( dA, dB, sizes ); break; } } } __device__ void strsv_upper_16kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_9(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 9-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_10(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 10-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_11(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 11-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_12(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 12-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_13(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 13-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_14(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 14-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_15(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 15-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_16(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 16-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_upper_16kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_upper_16kernel_1( dA, dB ); break; case 2: strsv_upper_16kernel_2( dA, dB ); break; case 3: strsv_upper_16kernel_3( dA, dB ); break; case 4: strsv_upper_16kernel_4( dA, dB ); break; case 5: strsv_upper_16kernel_5( dA, dB ); break; case 6: strsv_upper_16kernel_6( dA, dB ); break; case 7: strsv_upper_16kernel_7( dA, dB ); break; case 8: strsv_upper_16kernel_8( dA, dB ); break; case 9: strsv_upper_16kernel_9( dA, dB ); break; case 10: strsv_upper_16kernel_10( dA, dB ); break; case 11: strsv_upper_16kernel_11( dA, dB ); break; case 12: strsv_upper_16kernel_12( dA, dB ); break; case 13: strsv_upper_16kernel_13( dA, dB ); break; case 14: strsv_upper_16kernel_14( dA, dB ); break; case 15: strsv_upper_16kernel_15( dA, dB ); break; case 16: strsv_upper_16kernel_16( dA, dB ); break; default: strsv_upper_16kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_sgpumemzero_16kernel( float * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_S_MAKE( 0.0, 0.0 ); } __global__ void magma_slocations_lower_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_lower_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 16 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_slocations_upper_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_upper_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 16 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_sfilltrisystems_16kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_sbackinsert_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel #endif /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_s_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_s_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems float* trisystems @param[out] rhs float* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_sisaigenerator_16_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_s_matrix L, magma_s_matrix *M, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (TORCH_HIP_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( float( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { hipLaunchKernelGGL(( magma_sgpumemzero_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { hipLaunchKernelGGL(( magma_slocations_lower_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { hipLaunchKernelGGL(( magma_slocations_upper_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } // chunk it recursively into batches of 1600 for( int z=0; z<recursive; z++ ){ int limit = min(32000, L.num_rows-32000*z); hipLaunchKernelGGL(( magma_sgpumemzero_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, limit, WARP_SIZE, WARP_SIZE ); hipLaunchKernelGGL(( magma_sfilltrisystems_16kernel), dim3(r3grid), dim3(r3block), 0, queue->cuda_stream() , 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { hipLaunchKernelGGL(( strsv_lower_16kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*16*z, sizes+32000*z, limit ); } else { hipLaunchKernelGGL(( strsv_upper_16kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*16*z, sizes+32000*z, limit ); } } // routine 3 hipLaunchKernelGGL(( magma_sbackinsert_16kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 6.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
7af2b297cf472e2cbc53dcfb318b04be163e61d8.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zgeisai_16.cu, normal z -> s, Sun Nov 20 20:20:42 2016 */ #include "magmasparse_internal.h" #define PRECISION_s #define REAL #define BLOCKSIZE 16 #define WARP_SIZE 16 #define WRP 16 #define WRQ 4 #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION >= 7000) __device__ void strsv_lower_16kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_upper_16kernel_general(float *dA, float *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_lower_16kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_9(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 9; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_10(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 10; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_11(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 11; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_12(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 12; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_13(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 13; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_14(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 14; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_15(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 15; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_16kernel_16(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 16; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = __shfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_lower_16kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_lower_16kernel_1( dA, dB ); break; case 2: strsv_lower_16kernel_2( dA, dB ); break; case 3: strsv_lower_16kernel_3( dA, dB ); break; case 4: strsv_lower_16kernel_4( dA, dB ); break; case 5: strsv_lower_16kernel_5( dA, dB ); break; case 6: strsv_lower_16kernel_6( dA, dB ); break; case 7: strsv_lower_16kernel_7( dA, dB ); break; case 8: strsv_lower_16kernel_8( dA, dB ); break; case 9: strsv_lower_16kernel_9( dA, dB ); break; case 10: strsv_lower_16kernel_10( dA, dB ); break; case 11: strsv_lower_16kernel_11( dA, dB ); break; case 12: strsv_lower_16kernel_12( dA, dB ); break; case 13: strsv_lower_16kernel_13( dA, dB ); break; case 14: strsv_lower_16kernel_14( dA, dB ); break; case 15: strsv_lower_16kernel_15( dA, dB ); break; case 16: strsv_lower_16kernel_16( dA, dB ); break; default: strsv_lower_16kernel_general( dA, dB, sizes ); break; } } } __device__ void strsv_upper_16kernel_1(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_2(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_3(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_4(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_5(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_6(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_7(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_8(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_9(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 9-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_10(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 10-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_11(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 11-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_12(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 12-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_13(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 13-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_14(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 14-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_15(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 15-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_16kernel_16(float *dA, float *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 16-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = __shfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_upper_16kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_upper_16kernel_1( dA, dB ); break; case 2: strsv_upper_16kernel_2( dA, dB ); break; case 3: strsv_upper_16kernel_3( dA, dB ); break; case 4: strsv_upper_16kernel_4( dA, dB ); break; case 5: strsv_upper_16kernel_5( dA, dB ); break; case 6: strsv_upper_16kernel_6( dA, dB ); break; case 7: strsv_upper_16kernel_7( dA, dB ); break; case 8: strsv_upper_16kernel_8( dA, dB ); break; case 9: strsv_upper_16kernel_9( dA, dB ); break; case 10: strsv_upper_16kernel_10( dA, dB ); break; case 11: strsv_upper_16kernel_11( dA, dB ); break; case 12: strsv_upper_16kernel_12( dA, dB ); break; case 13: strsv_upper_16kernel_13( dA, dB ); break; case 14: strsv_upper_16kernel_14( dA, dB ); break; case 15: strsv_upper_16kernel_15( dA, dB ); break; case 16: strsv_upper_16kernel_16( dA, dB ); break; default: strsv_upper_16kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_sgpumemzero_16kernel( float * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_S_MAKE( 0.0, 0.0 ); } __global__ void magma_slocations_lower_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_lower_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 16 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_slocations_upper_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_slocations_trunc_upper_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 16 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_S_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_sfilltrisystems_16kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_sbackinsert_16kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, float *val, magma_index_t *sizes, float *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel #endif /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_s_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_s_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems float* trisystems @param[out] rhs float* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_sisaigenerator_16_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_s_matrix L, magma_s_matrix *M, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (CUDA_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( float( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { magma_sgpumemzero_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { magma_slocations_lower_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { magma_slocations_upper_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } // chunk it recursively into batches of 1600 for( int z=0; z<recursive; z++ ){ int limit = min(32000, L.num_rows-32000*z); magma_sgpumemzero_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, limit, WARP_SIZE, WARP_SIZE ); magma_sfilltrisystems_16kernel<<< r3grid, r3block, 0, queue->cuda_stream() >>>( 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { strsv_lower_16kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*16*z, sizes+32000*z, limit ); } else { strsv_upper_16kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*16*z, sizes+32000*z, limit ); } } // routine 3 magma_sbackinsert_16kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 6.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
17f6cdc2e4f0e29e384a3ed47d1283db1d50dbee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <prof.cu> //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/oemen.htm #include <assert.h> #include <cutil_inline.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.cuh" //////////////////////////////////////////////////////////////////////////////// // Monolithic Bacther's sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeSortShared( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir ){ //Shared memory storage for one or more small vectors __shared__ uint s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for(uint size = 2; size <= arrayLength; size <<= 1){ uint stride = size / 2; uint offset = threadIdx.x & (stride - 1); { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); stride >>= 1; } for(; stride > 0; stride >>= 1){ __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); if(offset >= stride) Comparator( s_key[pos - stride], s_val[pos - stride], s_key[pos + 0], s_val[pos + 0], dir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Odd-even merge sort iteration kernel // for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeGlobal( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir ){ uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x; //Odd-even merge uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); if(stride < size / 2){ uint offset = global_comparatorI & ( (size / 2) - 1); if(offset >= stride){ uint keyA = d_SrcKey[pos - stride]; uint valA = d_SrcVal[pos - stride]; uint keyB = d_SrcKey[pos + 0]; uint valB = d_SrcVal[pos + 0]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos - stride] = keyA; d_DstVal[pos - stride] = valA; d_DstKey[pos + 0] = keyB; d_DstVal[pos + 0] = valB; } }else{ uint keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; uint keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// //Helper function extern "C" uint factorRadix2(uint *log2L, uint L); extern "C" void oddEvenMergeSort( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ){ //Nothing to sort if(arrayLength < 2) return; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert( factorizationRemainder == 1 ); dir = (dir != 0); uint blockCount = (batchSize * arrayLength) / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if(arrayLength <= SHARED_SIZE_LIMIT){ assert( SHARED_SIZE_LIMIT % arrayLength == 0 ); GpuProfiling::prepareProfiling( blockCount, threadCount ); hipLaunchKernelGGL(( oddEvenMergeSortShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); GpuProfiling::addResults("oddEvenMergeSortShared"); }else{ GpuProfiling::prepareProfiling( blockCount, threadCount ); hipLaunchKernelGGL(( oddEvenMergeSortShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, SHARED_SIZE_LIMIT, dir); GpuProfiling::addResults("oddEvenMergeSortShared"); for(uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for(unsigned stride = size / 2; stride > 0; stride >>= 1){ //Unlike with bitonic sort, combining bitonic merge steps with //stride = [SHARED_SIZE_LIMIT / 2 .. 1] seems to be impossible as there are //dependencies between data elements crossing the SHARED_SIZE_LIMIT borders GpuProfiling::prepareProfiling( (batchSize * arrayLength) / 512, 256 ); hipLaunchKernelGGL(( oddEvenMergeGlobal), dim3((batchSize * arrayLength) / 512), dim3(256), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir); GpuProfiling::addResults("oddEvenMergeGlobal"); } } }
17f6cdc2e4f0e29e384a3ed47d1283db1d50dbee.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <prof.cu> //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/oemen.htm #include <assert.h> #include <cutil_inline.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.cuh" //////////////////////////////////////////////////////////////////////////////// // Monolithic Bacther's sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeSortShared( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir ){ //Shared memory storage for one or more small vectors __shared__ uint s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for(uint size = 2; size <= arrayLength; size <<= 1){ uint stride = size / 2; uint offset = threadIdx.x & (stride - 1); { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); stride >>= 1; } for(; stride > 0; stride >>= 1){ __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); if(offset >= stride) Comparator( s_key[pos - stride], s_val[pos - stride], s_key[pos + 0], s_val[pos + 0], dir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Odd-even merge sort iteration kernel // for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeGlobal( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir ){ uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x; //Odd-even merge uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); if(stride < size / 2){ uint offset = global_comparatorI & ( (size / 2) - 1); if(offset >= stride){ uint keyA = d_SrcKey[pos - stride]; uint valA = d_SrcVal[pos - stride]; uint keyB = d_SrcKey[pos + 0]; uint valB = d_SrcVal[pos + 0]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos - stride] = keyA; d_DstVal[pos - stride] = valA; d_DstKey[pos + 0] = keyB; d_DstVal[pos + 0] = valB; } }else{ uint keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; uint keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// //Helper function extern "C" uint factorRadix2(uint *log2L, uint L); extern "C" void oddEvenMergeSort( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ){ //Nothing to sort if(arrayLength < 2) return; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert( factorizationRemainder == 1 ); dir = (dir != 0); uint blockCount = (batchSize * arrayLength) / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if(arrayLength <= SHARED_SIZE_LIMIT){ assert( SHARED_SIZE_LIMIT % arrayLength == 0 ); GpuProfiling::prepareProfiling( blockCount, threadCount ); oddEvenMergeSortShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); GpuProfiling::addResults("oddEvenMergeSortShared"); }else{ GpuProfiling::prepareProfiling( blockCount, threadCount ); oddEvenMergeSortShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, SHARED_SIZE_LIMIT, dir); GpuProfiling::addResults("oddEvenMergeSortShared"); for(uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for(unsigned stride = size / 2; stride > 0; stride >>= 1){ //Unlike with bitonic sort, combining bitonic merge steps with //stride = [SHARED_SIZE_LIMIT / 2 .. 1] seems to be impossible as there are //dependencies between data elements crossing the SHARED_SIZE_LIMIT borders GpuProfiling::prepareProfiling( (batchSize * arrayLength) / 512, 256 ); oddEvenMergeGlobal<<<(batchSize * arrayLength) / 512, 256>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir); GpuProfiling::addResults("oddEvenMergeGlobal"); } } }
940acf6f07ddfd70ada9a39e30fdbd97d0b18e39.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <gmp.h> #include <cassert> #include "cgbn/cgbn.h" #include "utility/support.h" #define TPI 32 #define BITS 768 #define TPB 128 // the number of threads per block to launch (must be divisible by 32 typedef struct { cgbn_mem_t<BITS> x; cgbn_mem_t<BITS> y; cgbn_mem_t<BITS> m; cgbn_mem_t<BITS> mul_lo; cgbn_mem_t<BITS> mul_hi; } my_instance_t; typedef struct { cgbn_mem_t<BITS> x; cgbn_mem_t<BITS> y; cgbn_mem_t<BITS> l; cgbn_mem_t<BITS> m; cgbn_mem_t<BITS> result; cgbn_mem_t<BITS> result2; cgbn_mem_t<BITS> mul_lo; cgbn_mem_t<BITS> mul_hi; } instance_t; typedef cgbn_context_t<TPI> context_t; typedef cgbn_env_t<context_t, 768> env1024_t; const uint64_t MNT4_INV = 0xf2044cfbe45e7fff; const uint64_t MNT6_INV = 0xc90776e23fffffff; // num is of size 2*n. modulus is of size n // result is of size n. void reduce_wide(mp_limb_t* result, mp_limb_t* num, mp_limb_t* modulus, uint64_t inv, int n) { mp_limb_t *res = num; // mp_limb_t res[2*n]; // mpn_mul_n(res, this->mont_repr.data, other.data, n); /* The Montgomery reduction here is based on Algorithm 14.32 in Handbook of Applied Cryptography <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>. */ for (size_t i = 0; i < n; ++i) { mp_limb_t k = inv * res[i]; /* calculate res = res + k * mod * b^i */ mp_limb_t carryout = mpn_addmul_1(res+i, modulus, n, k); carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout); assert(carryout == 0); } if (mpn_cmp(res+n, modulus, n) >= 0) { const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus, n); assert(borrow == 0); } mpn_copyi(result, res+n, n); } __device__ void store_np0(env1024_t::cgbn_t& l, uint32_t np0) { #if defined(__CUDA_ARCH__) #warning "including limbs code" l._limbs[10] = np0; l._limbs[11] = 0xe45e7fffu; printf("one %x, np-0 = %x\n", l._limbs[10], l._limbs[11]); #endif } __global__ void my_kernel(my_instance_t *problem_instances, uint32_t instance_count) { context_t bn_context; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp) env1024_t::cgbn_wide_t mul_wide; // uint32_t np0; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); // np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); cgbn_mul_wide(bn1024_env, mul_wide, a, b); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } __global__ void add_kernel(instance_t *problem_instances, uint32_t instance_count, int add_pow_count) { context_t bn_context; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, mul_r, add_r, add_r1, add_r2, acc_r, acc_r1, acc_r2, m, l; // three 1024-bit values (spread across a warp) env1024_t::cgbn_t mul_r2; env1024_t::cgbn_wide_t mul_wide; uint32_t np0; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); cgbn_load(bn1024_env, l, &(problem_instances[my_instance]).l); // cgbn_add(bn1024_env, r, a, b); np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); #if defined(__CUDA_ARCH__) l._limbs[12] = np0; #endif np0=cgbn_bn2mont(bn1024_env, l, l, m); // cgbn_bn2mont(bn1024_env, b, b, m); cgbn_mont_mul(bn1024_env, mul_r, a, b, m, np0); cgbn_mul_wide(bn1024_env, mul_wide, a, b); if (cgbn_compare(bn1024_env, mul_r, m) >= 0) { cgbn_sub(bn1024_env, add_r, mul_r, m); cgbn_set(bn1024_env, mul_r, add_r); } cgbn_set(bn1024_env, add_r, a); cgbn_set(bn1024_env, acc_r, a); for (int i = 0; i < add_pow_count; i ++) { cgbn_add(bn1024_env, add_r1, add_r, add_r); if (cgbn_compare(bn1024_env, add_r1, m) >= 0) { cgbn_sub(bn1024_env, add_r2, add_r1, m); cgbn_set(bn1024_env, add_r, add_r2); } else { cgbn_set(bn1024_env, add_r, add_r1); } cgbn_add(bn1024_env, acc_r1, acc_r, add_r); if (cgbn_compare(bn1024_env, acc_r1, m) >= 0) { cgbn_sub(bn1024_env, acc_r2, acc_r1, m); cgbn_set(bn1024_env, acc_r, acc_r2); } else { cgbn_set(bn1024_env, acc_r, acc_r1); } } cgbn_store(bn1024_env, &(problem_instances[my_instance].result), acc_r); //cgbn_mont2bn(bn1024_env, r, r, m, np0); // int use_r2 = cgbn_sub(bn1024_env, add_r2, add_r1, m); // if (use_r2 == 0) { // } else { // cgbn_store(bn1024_env, &(problem_instances[my_instance].result), add_r1); // } cgbn_mont_reduce_wide(bn1024_env, mul_r2, mul_wide, m, 0xe45e7fffu); cgbn_store(bn1024_env, &(problem_instances[my_instance].result2), mul_r2); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } void set_literal(cgbn_mem_t<BITS>& h, uint32_t literal, int num) { for (int i = 1; i < num; i ++ ) { h._limbs[i] = 0; } h._limbs[0] = literal; } void set_literal_limbs(cgbn_mem_t<BITS>& h, uint32_t literal, int num, int size) { for (int i = 0; i < num; i ++ ) { h._limbs[i] = literal; } for (int i = num; i < size; i ++ ) { h._limbs[i] = 0; } } void print_uint8_array(uint8_t* array, int size) { for (int i = 0; i < size; i ++) { printf("%02x", array[i]); } printf("\n"); } uint8_t* call_mycuda(uint8_t* x, uint8_t* y, uint8_t *m, int num_bytes) { int count = 1; instance_t *gpuInstances; instance_t* instance_array = (instance_t*) malloc(sizeof(instance_t) * count); cgbn_error_report_t *report; // create a cgbn_error_report for CGBN to report back errors NEW_CUDA_CHECK(cgbn_error_report_alloc(&report)); uint32_t* x32 = (uint32_t*) x; uint32_t* y32 = (uint32_t*) y; uint32_t* m32 = (uint32_t*) m; for (int i = 0; i < num_bytes / 4; i ++ ) { instance_array->x._limbs[i] = x32[i]; } for (int i = 0; i < num_bytes / 4; i ++ ) { instance_array->y._limbs[i] = y32[i]; } for (int i = 0; i < num_bytes / 4; i ++ ) { instance_array->m._limbs[i] = m32[i]; } set_literal_limbs(instance_array->l, (uint32_t)0xFFFFFFFF, 2, num_bytes/4); printf("\n L:"); print_uint8_array((uint8_t*) instance_array->l._limbs, num_bytes); printf("Copying instances to the GPU ...\n"); NEW_CUDA_CHECK(hipSetDevice(0)); NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(instance_t)*count)); NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(instance_t)*count, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( add_kernel), dim3(1), dim3(32), 0, 0, gpuInstances, 1, 63); NEW_CUDA_CHECK(hipDeviceSynchronize()); CGBN_CHECK(report); // copy the instances back from gpuMemory printf("Copying results back to CPU ...\n"); NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(instance_t)*count, hipMemcpyDeviceToHost)); uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t)); uint32_t* result32 = (uint32_t*) result; for (int i = 0; i < num_bytes / 4; i ++ ) { result32[i] = instance_array->result._limbs[i]; } printf("Printing mont-mul result:"); print_uint8_array((uint8_t*) instance_array->result2._limbs, num_bytes); printf("Printing mont-mul HI result:"); print_uint8_array((uint8_t*) instance_array->mul_hi._limbs, num_bytes); printf("Printing mont-mul LOW result:"); print_uint8_array((uint8_t*) instance_array->mul_lo._limbs, num_bytes); printf("Done. returning ...\n"); int num_limbs = num_bytes / 8; printf("\n Setting num 64 limbs = %d", num_limbs); mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2); std::memcpy((void*)num, (const void*)instance_array->mul_lo._limbs, num_bytes); std::memcpy((void*) (num + num_limbs), (const void*)instance_array->mul_hi._limbs, num_bytes); printf("\n Dumping 64 byte limb wide num:"); gmp_printf("%Nx\n", num, num_limbs * 2); mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes); mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); printf("\n Dumping 64 byte modulus:"); gmp_printf("%Nx\n", m, num_limbs); reduce_wide(fresult, num, modulus, 0xf2044cfbe45e7fff, num_limbs); printf("\n Dumping 64 byte result:"); gmp_printf("%Nx\n", fresult, num_limbs); free(num); free(modulus); free(fresult); return result; } std::vector<uint8_t*>* compute_newcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes, uint64_t inv) { int num_elements = a.size(); my_instance_t *gpuInstances; my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements); cgbn_error_report_t *report; // create a cgbn_error_report for CGBN to report back errors NEW_CUDA_CHECK(cgbn_error_report_alloc(&report)); for (int i = 0; i < num_elements; i ++) { std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes); std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes); std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes); } printf("Copying instances to the GPU ...\n"); NEW_CUDA_CHECK(hipSetDevice(0)); NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements)); NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, hipMemcpyHostToDevice)); int tpb = TPB; printf("\n Threads per block =%d", tpb); int IPB = TPB/TPI; int tpi = TPI; printf("\n Threads per instance = %d", tpi); printf("\n Instances per block = %d", IPB); uint32_t num_blocks = (num_elements+IPB-1)/IPB; printf("\n Number of blocks = %d", num_blocks); hipLaunchKernelGGL(( my_kernel), dim3(num_blocks), dim3(TPB), 0, 0, gpuInstances, num_elements); NEW_CUDA_CHECK(hipDeviceSynchronize()); CGBN_CHECK(report); // copy the instances back from gpuMemory printf("Copying results back to CPU ...\n"); NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, hipMemcpyDeviceToHost)); int num_limbs = num_bytes / 8; printf("\n Setting num 64 limbs = %d", num_limbs); mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2); mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes); printf("\n Dumping modulus:"); gmp_printf("%Nx\n", modulus, num_limbs); std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>(); for (int i = 0; i < num_elements; i ++) { // Reduce std::memcpy((void*)num, (const void*)instance_array[i].mul_lo._limbs, num_bytes); std::memcpy((void*) (num + num_limbs), (const void*)instance_array[i].mul_hi._limbs, num_bytes); mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); // printf("\n Dumping 64 byte limb wide num [%d]:", i); // gmp_printf("%Nx\n", num, num_limbs * 2); reduce_wide(fresult, num, modulus, inv, num_limbs); // store the result. res_vector->emplace_back((uint8_t*)fresult); } free(num); free(modulus); free(instance_array); hipFree(gpuInstances); return res_vector; }
940acf6f07ddfd70ada9a39e30fdbd97d0b18e39.cu
#include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <cuda.h> #include <gmp.h> #include <cassert> #include "cgbn/cgbn.h" #include "utility/support.h" #define TPI 32 #define BITS 768 #define TPB 128 // the number of threads per block to launch (must be divisible by 32 typedef struct { cgbn_mem_t<BITS> x; cgbn_mem_t<BITS> y; cgbn_mem_t<BITS> m; cgbn_mem_t<BITS> mul_lo; cgbn_mem_t<BITS> mul_hi; } my_instance_t; typedef struct { cgbn_mem_t<BITS> x; cgbn_mem_t<BITS> y; cgbn_mem_t<BITS> l; cgbn_mem_t<BITS> m; cgbn_mem_t<BITS> result; cgbn_mem_t<BITS> result2; cgbn_mem_t<BITS> mul_lo; cgbn_mem_t<BITS> mul_hi; } instance_t; typedef cgbn_context_t<TPI> context_t; typedef cgbn_env_t<context_t, 768> env1024_t; const uint64_t MNT4_INV = 0xf2044cfbe45e7fff; const uint64_t MNT6_INV = 0xc90776e23fffffff; // num is of size 2*n. modulus is of size n // result is of size n. void reduce_wide(mp_limb_t* result, mp_limb_t* num, mp_limb_t* modulus, uint64_t inv, int n) { mp_limb_t *res = num; // mp_limb_t res[2*n]; // mpn_mul_n(res, this->mont_repr.data, other.data, n); /* The Montgomery reduction here is based on Algorithm 14.32 in Handbook of Applied Cryptography <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>. */ for (size_t i = 0; i < n; ++i) { mp_limb_t k = inv * res[i]; /* calculate res = res + k * mod * b^i */ mp_limb_t carryout = mpn_addmul_1(res+i, modulus, n, k); carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout); assert(carryout == 0); } if (mpn_cmp(res+n, modulus, n) >= 0) { const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus, n); assert(borrow == 0); } mpn_copyi(result, res+n, n); } __device__ void store_np0(env1024_t::cgbn_t& l, uint32_t np0) { #if defined(__CUDA_ARCH__) #warning "including limbs code" l._limbs[10] = np0; l._limbs[11] = 0xe45e7fffu; printf("one %x, np-0 = %x\n", l._limbs[10], l._limbs[11]); #endif } __global__ void my_kernel(my_instance_t *problem_instances, uint32_t instance_count) { context_t bn_context; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp) env1024_t::cgbn_wide_t mul_wide; // uint32_t np0; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); // np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); cgbn_mul_wide(bn1024_env, mul_wide, a, b); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } __global__ void add_kernel(instance_t *problem_instances, uint32_t instance_count, int add_pow_count) { context_t bn_context; // create a CGBN context env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math env1024_t::cgbn_t a, b, mul_r, add_r, add_r1, add_r2, acc_r, acc_r1, acc_r2, m, l; // three 1024-bit values (spread across a warp) env1024_t::cgbn_t mul_r2; env1024_t::cgbn_wide_t mul_wide; uint32_t np0; int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number if(my_instance>=instance_count) return; // return if my_instance is not valid cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x); cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y); cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m); cgbn_load(bn1024_env, l, &(problem_instances[my_instance]).l); // cgbn_add(bn1024_env, r, a, b); np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m)); #if defined(__CUDA_ARCH__) l._limbs[12] = np0; #endif np0=cgbn_bn2mont(bn1024_env, l, l, m); // cgbn_bn2mont(bn1024_env, b, b, m); cgbn_mont_mul(bn1024_env, mul_r, a, b, m, np0); cgbn_mul_wide(bn1024_env, mul_wide, a, b); if (cgbn_compare(bn1024_env, mul_r, m) >= 0) { cgbn_sub(bn1024_env, add_r, mul_r, m); cgbn_set(bn1024_env, mul_r, add_r); } cgbn_set(bn1024_env, add_r, a); cgbn_set(bn1024_env, acc_r, a); for (int i = 0; i < add_pow_count; i ++) { cgbn_add(bn1024_env, add_r1, add_r, add_r); if (cgbn_compare(bn1024_env, add_r1, m) >= 0) { cgbn_sub(bn1024_env, add_r2, add_r1, m); cgbn_set(bn1024_env, add_r, add_r2); } else { cgbn_set(bn1024_env, add_r, add_r1); } cgbn_add(bn1024_env, acc_r1, acc_r, add_r); if (cgbn_compare(bn1024_env, acc_r1, m) >= 0) { cgbn_sub(bn1024_env, acc_r2, acc_r1, m); cgbn_set(bn1024_env, acc_r, acc_r2); } else { cgbn_set(bn1024_env, acc_r, acc_r1); } } cgbn_store(bn1024_env, &(problem_instances[my_instance].result), acc_r); //cgbn_mont2bn(bn1024_env, r, r, m, np0); // int use_r2 = cgbn_sub(bn1024_env, add_r2, add_r1, m); // if (use_r2 == 0) { // } else { // cgbn_store(bn1024_env, &(problem_instances[my_instance].result), add_r1); // } cgbn_mont_reduce_wide(bn1024_env, mul_r2, mul_wide, m, 0xe45e7fffu); cgbn_store(bn1024_env, &(problem_instances[my_instance].result2), mul_r2); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low); cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high); } void set_literal(cgbn_mem_t<BITS>& h, uint32_t literal, int num) { for (int i = 1; i < num; i ++ ) { h._limbs[i] = 0; } h._limbs[0] = literal; } void set_literal_limbs(cgbn_mem_t<BITS>& h, uint32_t literal, int num, int size) { for (int i = 0; i < num; i ++ ) { h._limbs[i] = literal; } for (int i = num; i < size; i ++ ) { h._limbs[i] = 0; } } void print_uint8_array(uint8_t* array, int size) { for (int i = 0; i < size; i ++) { printf("%02x", array[i]); } printf("\n"); } uint8_t* call_mycuda(uint8_t* x, uint8_t* y, uint8_t *m, int num_bytes) { int count = 1; instance_t *gpuInstances; instance_t* instance_array = (instance_t*) malloc(sizeof(instance_t) * count); cgbn_error_report_t *report; // create a cgbn_error_report for CGBN to report back errors NEW_CUDA_CHECK(cgbn_error_report_alloc(&report)); uint32_t* x32 = (uint32_t*) x; uint32_t* y32 = (uint32_t*) y; uint32_t* m32 = (uint32_t*) m; for (int i = 0; i < num_bytes / 4; i ++ ) { instance_array->x._limbs[i] = x32[i]; } for (int i = 0; i < num_bytes / 4; i ++ ) { instance_array->y._limbs[i] = y32[i]; } for (int i = 0; i < num_bytes / 4; i ++ ) { instance_array->m._limbs[i] = m32[i]; } set_literal_limbs(instance_array->l, (uint32_t)0xFFFFFFFF, 2, num_bytes/4); printf("\n L:"); print_uint8_array((uint8_t*) instance_array->l._limbs, num_bytes); printf("Copying instances to the GPU ...\n"); NEW_CUDA_CHECK(cudaSetDevice(0)); NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(instance_t)*count)); NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(instance_t)*count, cudaMemcpyHostToDevice)); add_kernel<<<1, 32>>>(gpuInstances, 1, 63); NEW_CUDA_CHECK(cudaDeviceSynchronize()); CGBN_CHECK(report); // copy the instances back from gpuMemory printf("Copying results back to CPU ...\n"); NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(instance_t)*count, cudaMemcpyDeviceToHost)); uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t)); uint32_t* result32 = (uint32_t*) result; for (int i = 0; i < num_bytes / 4; i ++ ) { result32[i] = instance_array->result._limbs[i]; } printf("Printing mont-mul result:"); print_uint8_array((uint8_t*) instance_array->result2._limbs, num_bytes); printf("Printing mont-mul HI result:"); print_uint8_array((uint8_t*) instance_array->mul_hi._limbs, num_bytes); printf("Printing mont-mul LOW result:"); print_uint8_array((uint8_t*) instance_array->mul_lo._limbs, num_bytes); printf("Done. returning ...\n"); int num_limbs = num_bytes / 8; printf("\n Setting num 64 limbs = %d", num_limbs); mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2); std::memcpy((void*)num, (const void*)instance_array->mul_lo._limbs, num_bytes); std::memcpy((void*) (num + num_limbs), (const void*)instance_array->mul_hi._limbs, num_bytes); printf("\n Dumping 64 byte limb wide num:"); gmp_printf("%Nx\n", num, num_limbs * 2); mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes); mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); printf("\n Dumping 64 byte modulus:"); gmp_printf("%Nx\n", m, num_limbs); reduce_wide(fresult, num, modulus, 0xf2044cfbe45e7fff, num_limbs); printf("\n Dumping 64 byte result:"); gmp_printf("%Nx\n", fresult, num_limbs); free(num); free(modulus); free(fresult); return result; } std::vector<uint8_t*>* compute_newcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes, uint64_t inv) { int num_elements = a.size(); my_instance_t *gpuInstances; my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements); cgbn_error_report_t *report; // create a cgbn_error_report for CGBN to report back errors NEW_CUDA_CHECK(cgbn_error_report_alloc(&report)); for (int i = 0; i < num_elements; i ++) { std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes); std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes); std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes); } printf("Copying instances to the GPU ...\n"); NEW_CUDA_CHECK(cudaSetDevice(0)); NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements)); NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, cudaMemcpyHostToDevice)); int tpb = TPB; printf("\n Threads per block =%d", tpb); int IPB = TPB/TPI; int tpi = TPI; printf("\n Threads per instance = %d", tpi); printf("\n Instances per block = %d", IPB); uint32_t num_blocks = (num_elements+IPB-1)/IPB; printf("\n Number of blocks = %d", num_blocks); my_kernel<<<num_blocks, TPB>>>(gpuInstances, num_elements); NEW_CUDA_CHECK(cudaDeviceSynchronize()); CGBN_CHECK(report); // copy the instances back from gpuMemory printf("Copying results back to CPU ...\n"); NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, cudaMemcpyDeviceToHost)); int num_limbs = num_bytes / 8; printf("\n Setting num 64 limbs = %d", num_limbs); mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2); mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes); printf("\n Dumping modulus:"); gmp_printf("%Nx\n", modulus, num_limbs); std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>(); for (int i = 0; i < num_elements; i ++) { // Reduce std::memcpy((void*)num, (const void*)instance_array[i].mul_lo._limbs, num_bytes); std::memcpy((void*) (num + num_limbs), (const void*)instance_array[i].mul_hi._limbs, num_bytes); mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs); // printf("\n Dumping 64 byte limb wide num [%d]:", i); // gmp_printf("%Nx\n", num, num_limbs * 2); reduce_wide(fresult, num, modulus, inv, num_limbs); // store the result. res_vector->emplace_back((uint8_t*)fresult); } free(num); free(modulus); free(instance_array); cudaFree(gpuInstances); return res_vector; }
0eada1b6bf058b348ce2b50a332412d05d78fd19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #foreach( $degs in $degrees ) // P(X)/Q(X) = a_0 + a_1*X + a_2*X^2 + ... + a_n*X^n / 1 + |b_0||X| + |b_1||X|^2 + ... + |b_i||X|^{i+1} #set( $degs_a = $degs[0] ) #set( $degs_b = $degs[1] ) #set( $coefs_a = $degs_a ) #set( $coefs_b = $degs_b - 1 ) #set( $a_counts = $coefs_a + 1 ) #set( $b_counts = $coefs_b + 1 ) #set( $max_x = $degs[2] ) template <typename scalar_t> __global__ void rational_cuda_forward_A_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ x, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, scalar_t* __restrict__ result, size_t x_size) { #foreach( $idx in [0..$coefs_a] ) scalar_t a_$idx = a[$idx]; #end #foreach( $idx in [0..$coefs_b] ) scalar_t ab_$idx = abs(b[$idx]); #end for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < x_size; index += blockDim.x * gridDim.x){ scalar_t xp1 = x[index]; #foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 ) scalar_t xp$idx = xp$value * xp1; #end #foreach( $idx in [1..$degs_b] ) scalar_t axp$idx = abs(xp$idx); #end scalar_t P = a_0 #foreach( $idx in [1..$coefs_a] ) + a_$idx * xp$idx #end ; scalar_t Q = scalar_t(1.0) #foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 ) + ab_$idx * axp$value #end ; result[index] = P / Q; } } at::Tensor rational_cuda_forward_A_$degs[0]_$degs[1](torch::Tensor x, torch::Tensor n, torch::Tensor d){ auto result = at::empty_like(x); const auto x_size = x.numel(); int blockSize = THREADS_PER_BLOCK; int numBlocks = (x_size + blockSize - 1) / blockSize; AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_forward_A_$degs[0]_$degs[1]", ([&] { rational_cuda_forward_A_kernel_$degs[0]_$degshipLaunchKernelGGL(([1]<scalar_t>) , dim3(numBlocks), dim3(blockSize), 0, 0, x.data_ptr<scalar_t>(), n.data_ptr<scalar_t>(), d.data_ptr<scalar_t>(), result.data_ptr<scalar_t>(), x_size); })); return result; } //P(X) = a_0 + a_1*X + a_2*X^2 ... //Q(X) = 1 + |b_0||X| + |b_1||X|^2 + |b_2||X|^3 //R(X) = a_1 + 2*a_2*X + 3*a_3*X ... //S(X) = sign(X) * ( |b_0| + 2|b_1||X| + 3|b_2||X|^2 ...) //dF/dx = (-P(X)/Q(X)^2)*S(X) + R(X)/Q(X) //dF/da_i = x^i/Q(X), i \in {0,$degs[0]} //dF/db_i = (-P(X)/Q(X)^2) * sign(b_i) * |X^{i+1}| , i \in {0,$degs[1]} template <typename scalar_t> __global__ void rational_cuda_backward_A_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ x, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, scalar_t* __restrict__ d_x, double* __restrict__ d_a, double* __restrict__ d_b, size_t x_size) { __shared__ double sda[$a_counts]; __shared__ double sdb[$b_counts]; if( threadIdx.x == 0){ #foreach( $idx in [0..$coefs_a] ) sda[$idx] = 0; #end #foreach( $idx in [0..$coefs_b] ) sdb[$idx] = 0; #end } __syncthreads(); #foreach( $idx in [0..$coefs_a] ) scalar_t d_a$idx = 0; scalar_t a_$idx = a[$idx]; #end #foreach( $idx in [0..$coefs_b] ) scalar_t d_b$idx = 0; scalar_t b_$idx = b[$idx]; scalar_t ab_$idx = abs(b_$idx); #end for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < x_size; index += blockDim.x * gridDim.x) { scalar_t xp1 = x[index]; scalar_t axp1 = abs(xp1); #foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 ) scalar_t xp$idx = xp$value * xp1; scalar_t axp$idx = abs(xp$idx); #end scalar_t P = a_0 #foreach( $idx in [1..$coefs_a] ) + a_$idx*xp$idx #end ; scalar_t Q = scalar_t(1.0) #foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 ) + ab_$idx * axp$value #end ; scalar_t R = a_1 #foreach( $idx in [2..$coefs_a] )#set( $value = $idx - 1 ) + scalar_t($idx.0) * a_$idx * xp$value #end ; scalar_t S = copysign( scalar_t(1.0), xp1 ) * (ab_0 #foreach( $idx in [1..$coefs_b] )#set( $value = $idx + 1 ) + scalar_t($value.0) * ab_$idx * axp$idx #end ); scalar_t mpq2 = -P/(Q*Q); scalar_t grad_o = grad_output[index]; scalar_t d_i_x = (R/Q + S*mpq2); d_x[index] = d_i_x * grad_o; #foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 ) scalar_t d_i_b$idx = mpq2 * copysign( scalar_t(1.0), b_$idx ) * axp$value; d_b$idx += d_i_b$idx * grad_o; #end scalar_t d_i_a0 = scalar_t(1.0)/Q; d_a0 += d_i_a0 * grad_o; #foreach( $idx in [1..$coefs_a] ) scalar_t d_i_a$idx = xp$idx/Q; d_a$idx += d_i_a$idx * grad_o; #end } #foreach( $idx in [0..$coefs_a] ) atomicAdd(&sda[$idx], d_a$idx); #end #foreach( $idx in [0..$coefs_b] ) atomicAdd(&sdb[$idx], d_b$idx); #end __syncthreads(); if( threadIdx.x == 0){ #foreach( $idx in [0..$coefs_a] ) atomicAdd(&d_a[$idx], sda[$idx]); #end #foreach( $idx in [0..$coefs_b] ) atomicAdd(&d_b[$idx], sdb[$idx]); #end } } std::vector<torch::Tensor> rational_cuda_backward_A_$degs[0]_$degs[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d){ const auto x_size = x.numel(); auto d_x = at::empty_like(x); auto d_n = at::zeros_like(n).toType(at::kDouble); auto d_d = at::zeros_like(d).toType(at::kDouble); int blockSize = THREADS_PER_BLOCK; AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_backward_A_$degs[0]_$degs[1]", ([&] { rational_cuda_backward_A_kernel_$degs[0]_$degshipLaunchKernelGGL(([1]<scalar_t>) , dim3(16), dim3(blockSize), 0, 0, grad_output.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), n.data_ptr<scalar_t>(), d.data_ptr<scalar_t>(), d_x.data_ptr<scalar_t>(), d_n.data_ptr<double>(), d_d.data_ptr<double>(), x_size); })); return {d_x, d_n.toType(at::kFloat), d_d.toType(at::kFloat)}; } #end
0eada1b6bf058b348ce2b50a332412d05d78fd19.cu
#foreach( $degs in $degrees ) // P(X)/Q(X) = a_0 + a_1*X + a_2*X^2 + ... + a_n*X^n / 1 + |b_0||X| + |b_1||X|^2 + ... + |b_i||X|^{i+1} #set( $degs_a = $degs[0] ) #set( $degs_b = $degs[1] ) #set( $coefs_a = $degs_a ) #set( $coefs_b = $degs_b - 1 ) #set( $a_counts = $coefs_a + 1 ) #set( $b_counts = $coefs_b + 1 ) #set( $max_x = $degs[2] ) template <typename scalar_t> __global__ void rational_cuda_forward_A_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ x, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, scalar_t* __restrict__ result, size_t x_size) { #foreach( $idx in [0..$coefs_a] ) scalar_t a_$idx = a[$idx]; #end #foreach( $idx in [0..$coefs_b] ) scalar_t ab_$idx = abs(b[$idx]); #end for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < x_size; index += blockDim.x * gridDim.x){ scalar_t xp1 = x[index]; #foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 ) scalar_t xp$idx = xp$value * xp1; #end #foreach( $idx in [1..$degs_b] ) scalar_t axp$idx = abs(xp$idx); #end scalar_t P = a_0 #foreach( $idx in [1..$coefs_a] ) + a_$idx * xp$idx #end ; scalar_t Q = scalar_t(1.0) #foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 ) + ab_$idx * axp$value #end ; result[index] = P / Q; } } at::Tensor rational_cuda_forward_A_$degs[0]_$degs[1](torch::Tensor x, torch::Tensor n, torch::Tensor d){ auto result = at::empty_like(x); const auto x_size = x.numel(); int blockSize = THREADS_PER_BLOCK; int numBlocks = (x_size + blockSize - 1) / blockSize; AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_forward_A_$degs[0]_$degs[1]", ([&] { rational_cuda_forward_A_kernel_$degs[0]_$degs[1]<scalar_t> <<<numBlocks, blockSize>>>( x.data_ptr<scalar_t>(), n.data_ptr<scalar_t>(), d.data_ptr<scalar_t>(), result.data_ptr<scalar_t>(), x_size); })); return result; } //P(X) = a_0 + a_1*X + a_2*X^2 ... //Q(X) = 1 + |b_0||X| + |b_1||X|^2 + |b_2||X|^3 //R(X) = a_1 + 2*a_2*X + 3*a_3*X ... //S(X) = sign(X) * ( |b_0| + 2|b_1||X| + 3|b_2||X|^2 ...) //dF/dx = (-P(X)/Q(X)^2)*S(X) + R(X)/Q(X) //dF/da_i = x^i/Q(X), i \in {0,$degs[0]} //dF/db_i = (-P(X)/Q(X)^2) * sign(b_i) * |X^{i+1}| , i \in {0,$degs[1]} template <typename scalar_t> __global__ void rational_cuda_backward_A_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ x, const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, scalar_t* __restrict__ d_x, double* __restrict__ d_a, double* __restrict__ d_b, size_t x_size) { __shared__ double sda[$a_counts]; __shared__ double sdb[$b_counts]; if( threadIdx.x == 0){ #foreach( $idx in [0..$coefs_a] ) sda[$idx] = 0; #end #foreach( $idx in [0..$coefs_b] ) sdb[$idx] = 0; #end } __syncthreads(); #foreach( $idx in [0..$coefs_a] ) scalar_t d_a$idx = 0; scalar_t a_$idx = a[$idx]; #end #foreach( $idx in [0..$coefs_b] ) scalar_t d_b$idx = 0; scalar_t b_$idx = b[$idx]; scalar_t ab_$idx = abs(b_$idx); #end for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < x_size; index += blockDim.x * gridDim.x) { scalar_t xp1 = x[index]; scalar_t axp1 = abs(xp1); #foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 ) scalar_t xp$idx = xp$value * xp1; scalar_t axp$idx = abs(xp$idx); #end scalar_t P = a_0 #foreach( $idx in [1..$coefs_a] ) + a_$idx*xp$idx #end ; scalar_t Q = scalar_t(1.0) #foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 ) + ab_$idx * axp$value #end ; scalar_t R = a_1 #foreach( $idx in [2..$coefs_a] )#set( $value = $idx - 1 ) + scalar_t($idx.0) * a_$idx * xp$value #end ; scalar_t S = copysign( scalar_t(1.0), xp1 ) * (ab_0 #foreach( $idx in [1..$coefs_b] )#set( $value = $idx + 1 ) + scalar_t($value.0) * ab_$idx * axp$idx #end ); scalar_t mpq2 = -P/(Q*Q); scalar_t grad_o = grad_output[index]; scalar_t d_i_x = (R/Q + S*mpq2); d_x[index] = d_i_x * grad_o; #foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 ) scalar_t d_i_b$idx = mpq2 * copysign( scalar_t(1.0), b_$idx ) * axp$value; d_b$idx += d_i_b$idx * grad_o; #end scalar_t d_i_a0 = scalar_t(1.0)/Q; d_a0 += d_i_a0 * grad_o; #foreach( $idx in [1..$coefs_a] ) scalar_t d_i_a$idx = xp$idx/Q; d_a$idx += d_i_a$idx * grad_o; #end } #foreach( $idx in [0..$coefs_a] ) atomicAdd(&sda[$idx], d_a$idx); #end #foreach( $idx in [0..$coefs_b] ) atomicAdd(&sdb[$idx], d_b$idx); #end __syncthreads(); if( threadIdx.x == 0){ #foreach( $idx in [0..$coefs_a] ) atomicAdd(&d_a[$idx], sda[$idx]); #end #foreach( $idx in [0..$coefs_b] ) atomicAdd(&d_b[$idx], sdb[$idx]); #end } } std::vector<torch::Tensor> rational_cuda_backward_A_$degs[0]_$degs[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d){ const auto x_size = x.numel(); auto d_x = at::empty_like(x); auto d_n = at::zeros_like(n).toType(at::kDouble); auto d_d = at::zeros_like(d).toType(at::kDouble); int blockSize = THREADS_PER_BLOCK; AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_backward_A_$degs[0]_$degs[1]", ([&] { rational_cuda_backward_A_kernel_$degs[0]_$degs[1]<scalar_t> <<<16, blockSize>>>( grad_output.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), n.data_ptr<scalar_t>(), d.data_ptr<scalar_t>(), d_x.data_ptr<scalar_t>(), d_n.data_ptr<double>(), d_d.data_ptr<double>(), x_size); })); return {d_x, d_n.toType(at::kFloat), d_d.toType(at::kFloat)}; } #end
90cbef594fdead6d008a8cd4673c4d0b12361e6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdafx.h" #include "cuda_loss_function.h" #include "core/cuda_platform.h" using namespace np::engine::loss; using namespace np::engine::loss::cuda; using namespace np::core::cuda; using namespace std; LossFunction* LossFunction::CreateInstanceCUDA(core::cuda::CudaInstance* cuda_instance, _loss_type type, bool read_label_for_target) { if (cuda_instance == NULL) { DEBUG_OUTPUT(L"no cuda instance"); return NULL; } switch (type) { case _loss_type::CrossEntropy: return new cuda::CrossEntropy(cuda_instance, read_label_for_target); case _loss_type::CrossEntropyMulticlass: return new cuda::CrossEntropyMulticlass(cuda_instance, read_label_for_target); default: // case nsas::_loss_type::MSE: return new cuda::MSE(cuda_instance, read_label_for_target); } } CUDALossFunction::CUDALossFunction(core::cuda::CudaInstance* cuda_instance, bool read_label_for_target) : LossFunction(core::math_device_type::cuda, cuda_instance, read_label_for_target) { } CUDALossFunction::~CUDALossFunction() { } neuron_error CUDALossFunction::CalcLoss(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target) { _NEURO_TENSOR_DATA gpu_loss_buffer(core::math_device_type::cuda, true); gpu_loss_buffer.Alloc(batch_size, value_size); if (!CalcLossVector(batch_size, value_size, output, target, gpu_loss_buffer.GetBuffer())) { DEBUG_OUTPUT(L"failed CalcLossVector"); return -1; } neuro_float sum_loss; if (!sum(gpu_loss_buffer.GetSize(), gpu_loss_buffer.GetBuffer(), sum_loss)) { DEBUG_OUTPUT(L"failed sum"); return -1; } return sum_loss; } inline __device__ neuro_float GetTargetValueFromLabel(neuro_u32 value_size, neuro_u32 index, const neuro_u32* label_vector) { return label_vector[index / value_size] == (index % value_size) ? 1.f : 0.f; } __global__ void MSECalcLoss(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float* loss) { CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; loss[i] = (output[i] - target_value) * (output[i] - target_value) / 2; } } bool MSE::CalcLossVector(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* loss) { const neuro_u32 N = batch_size * value_size; MSECalcLoss << <CudaPlatform::GetCudaBlockCount(N), CudaPlatform::threadsPerBlock >> > (N, value_size, output, target, m_read_label_for_target, loss); return CudaPlatform::CudaErrorCheck(hipPeekAtLastError()); } __global__ void MSECalcDiff(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float scale, neuro_float* diff) { CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; diff[i] = scale * (output[i] - target_value); } } bool MSE::CalcDiff(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* diff) { const neuron_error scale = normalize_factor(batch_size); const neuro_u32 N = batch_size * value_size; MSECalcDiff << <CudaPlatform::GetCudaBlockCount(N), CudaPlatform::threadsPerBlock >> > (N, value_size, output, target, m_read_label_for_target, scale, diff); #if 0//defined(_DEBUG) void* temp=malloc(batch_size*value_size*4); core::CPU_MemoryManager cpu; DEBUG_OUTPUT(L"output"); cpu.Memcpy(temp, output, batch_size * value_size * 4, core::math_device_type::cuda); NP_Util::DebugOutputValues((neuro_float*)temp, batch_size*value_size, 10); DEBUG_OUTPUT(L"diff"); cpu.Memcpy(temp, diff, batch_size * value_size * 4, core::math_device_type::cuda); NP_Util::DebugOutputValues((neuro_float*)temp, batch_size*value_size, 10); DEBUG_OUTPUT(L"label"); cpu.Memcpy(temp, target, batch_size * 4, core::math_device_type::cuda); NP_Util::DebugOutputValues((neuro_u32*)temp, batch_size, 10); free(temp); #endif return CudaPlatform::CudaErrorCheck(hipPeekAtLastError()); } //#define _LOG_THRESHOLD 1e-20 // .. cross entropy ... CrossEntropy::CrossEntropy(core::cuda::CudaInstance* cuda_instance, bool read_label_for_target) : CUDALossFunction(cuda_instance, read_label_for_target) { } __global__ void CrossEntropyLoss(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float* loss) { // output 0 1 FLT_MIN CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; loss[i] = -target_value * log(max(output[i], FLT_MIN)) - (neuro_float(1) - target_value) * log(max(neuro_float(1) - output[i], FLT_MIN)); } } bool CrossEntropy::CalcLossVector(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* loss) { CrossEntropyLoss << <CudaPlatform::GetCudaBlockCount(batch_size*value_size), CudaPlatform::threadsPerBlock >> > (batch_size*value_size, value_size, output, target, m_read_label_for_target, loss); return CudaPlatform::CudaErrorCheck(hipPeekAtLastError()); } __global__ void CrossEntropyDiff(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float scale, neuro_float* diff) { CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; neuro_float prob = max(output[i], FLT_MIN); diff[i] = scale * target_value / prob; } } bool CrossEntropy::CalcDiff(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* diff) { const neuron_error scale = -normalize_factor(batch_size); const neuro_u32 N = batch_size * value_size; CrossEntropyDiff << <CudaPlatform::GetCudaBlockCount(N), CudaPlatform::threadsPerBlock >> > (N, value_size, output, target, m_read_label_for_target, scale, diff); return CudaPlatform::CudaErrorCheck(hipPeekAtLastError()); } CrossEntropyMulticlass::CrossEntropyMulticlass(core::cuda::CudaInstance* cuda_instance, bool read_label_for_target) : CUDALossFunction(cuda_instance, read_label_for_target) { } __global__ void CrossEntropyMulticlassLoss(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, bool is_label, neuro_float* loss) { // output 0 FLT_MIN CUDA_KERNEL_LOOP(sample, N) { neuro_u32 label = 0; if (is_label) { label = ((neuro_u32*)target)[sample]; } else { const neuro_float* target_p = (neuro_float*)target + sample*value_size; for (neuro_u32 t_index = 1; t_index < value_size; t_index++) { if (target_p[t_index]>target_p[label]) label = t_index; } } loss[sample] = -log(max(output[sample*value_size + label], FLT_MIN)); } } // cross-entropy loss function for multi-class classification neuron_error CrossEntropyMulticlass::CalcLoss(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target) { _NEURO_TENSOR_DATA gpu_loss_buffer(core::math_device_type::cuda, true); gpu_loss_buffer.Alloc(batch_size, 1); CrossEntropyMulticlassLoss << <CudaPlatform::GetCudaBlockCount(batch_size), CudaPlatform::threadsPerBlock >> > (batch_size, value_size, output, target, m_read_label_for_target, gpu_loss_buffer.GetBuffer()); if(!CudaPlatform::CudaErrorCheck(hipPeekAtLastError())) { DEBUG_OUTPUT(L"failed CrossEntropyMulticlassCalcLoss"); return -1; } neuro_float sum_loss; if (!sum(gpu_loss_buffer.GetSize(), gpu_loss_buffer.GetBuffer(), sum_loss)) { DEBUG_OUTPUT(L"failed sum"); return -1; } return sum_loss; } __global__ void CrossEntropyMulticlassDiff(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, bool is_label, neuro_float scale, neuro_float* diff) { CUDA_KERNEL_LOOP(sample, N) { neuro_u32 label = 0; if (is_label) { label = ((neuro_u32*)target)[sample]; } else { const neuro_float* target_p = (neuro_float*)target + sample*value_size; for (neuro_u32 t_index = 1; t_index < value_size; t_index++) { if (target_p[t_index]>target_p[label]) label = t_index; } } neuro_float prob = max(output[sample*value_size + label], FLT_MIN); diff[sample*value_size + label] = scale / prob; } } bool CrossEntropyMulticlass::CalcDiff(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* diff) { neuron_error scale = - normalize_factor(batch_size); CrossEntropyMulticlassDiff << <CudaPlatform::GetCudaBlockCount(batch_size), CudaPlatform::threadsPerBlock >> > (batch_size, value_size, output, target, m_read_label_for_target, scale, diff); return CudaPlatform::CudaErrorCheck(hipPeekAtLastError()); }
90cbef594fdead6d008a8cd4673c4d0b12361e6d.cu
#include "stdafx.h" #include "cuda_loss_function.h" #include "core/cuda_platform.h" using namespace np::engine::loss; using namespace np::engine::loss::cuda; using namespace np::core::cuda; using namespace std; LossFunction* LossFunction::CreateInstanceCUDA(core::cuda::CudaInstance* cuda_instance, _loss_type type, bool read_label_for_target) { if (cuda_instance == NULL) { DEBUG_OUTPUT(L"no cuda instance"); return NULL; } switch (type) { case _loss_type::CrossEntropy: return new cuda::CrossEntropy(cuda_instance, read_label_for_target); case _loss_type::CrossEntropyMulticlass: return new cuda::CrossEntropyMulticlass(cuda_instance, read_label_for_target); default: // case nsas::_loss_type::MSE: return new cuda::MSE(cuda_instance, read_label_for_target); } } CUDALossFunction::CUDALossFunction(core::cuda::CudaInstance* cuda_instance, bool read_label_for_target) : LossFunction(core::math_device_type::cuda, cuda_instance, read_label_for_target) { } CUDALossFunction::~CUDALossFunction() { } neuron_error CUDALossFunction::CalcLoss(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target) { _NEURO_TENSOR_DATA gpu_loss_buffer(core::math_device_type::cuda, true); gpu_loss_buffer.Alloc(batch_size, value_size); if (!CalcLossVector(batch_size, value_size, output, target, gpu_loss_buffer.GetBuffer())) { DEBUG_OUTPUT(L"failed CalcLossVector"); return -1; } neuro_float sum_loss; if (!sum(gpu_loss_buffer.GetSize(), gpu_loss_buffer.GetBuffer(), sum_loss)) { DEBUG_OUTPUT(L"failed sum"); return -1; } return sum_loss; } inline __device__ neuro_float GetTargetValueFromLabel(neuro_u32 value_size, neuro_u32 index, const neuro_u32* label_vector) { return label_vector[index / value_size] == (index % value_size) ? 1.f : 0.f; } __global__ void MSECalcLoss(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float* loss) { CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; loss[i] = (output[i] - target_value) * (output[i] - target_value) / 2; } } bool MSE::CalcLossVector(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* loss) { const neuro_u32 N = batch_size * value_size; MSECalcLoss << <CudaPlatform::GetCudaBlockCount(N), CudaPlatform::threadsPerBlock >> > (N, value_size, output, target, m_read_label_for_target, loss); return CudaPlatform::CudaErrorCheck(cudaPeekAtLastError()); } __global__ void MSECalcDiff(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float scale, neuro_float* diff) { CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; diff[i] = scale * (output[i] - target_value); } } bool MSE::CalcDiff(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* diff) { const neuron_error scale = normalize_factor(batch_size); const neuro_u32 N = batch_size * value_size; MSECalcDiff << <CudaPlatform::GetCudaBlockCount(N), CudaPlatform::threadsPerBlock >> > (N, value_size, output, target, m_read_label_for_target, scale, diff); #if 0//defined(_DEBUG) void* temp=malloc(batch_size*value_size*4); core::CPU_MemoryManager cpu; DEBUG_OUTPUT(L"output"); cpu.Memcpy(temp, output, batch_size * value_size * 4, core::math_device_type::cuda); NP_Util::DebugOutputValues((neuro_float*)temp, batch_size*value_size, 10); DEBUG_OUTPUT(L"diff"); cpu.Memcpy(temp, diff, batch_size * value_size * 4, core::math_device_type::cuda); NP_Util::DebugOutputValues((neuro_float*)temp, batch_size*value_size, 10); DEBUG_OUTPUT(L"label"); cpu.Memcpy(temp, target, batch_size * 4, core::math_device_type::cuda); NP_Util::DebugOutputValues((neuro_u32*)temp, batch_size, 10); free(temp); #endif return CudaPlatform::CudaErrorCheck(cudaPeekAtLastError()); } //#define _LOG_THRESHOLD 1e-20 // 흠.. cross entropy는 좀더 확인해봐야함... CrossEntropy::CrossEntropy(core::cuda::CudaInstance* cuda_instance, bool read_label_for_target) : CUDALossFunction(cuda_instance, read_label_for_target) { } __global__ void CrossEntropyLoss(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float* loss) { // output가 0이거나 1일때 문제가 되서 FLT_MIN 사용 CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; loss[i] = -target_value * log(max(output[i], FLT_MIN)) - (neuro_float(1) - target_value) * log(max(neuro_float(1) - output[i], FLT_MIN)); } } bool CrossEntropy::CalcLossVector(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* loss) { CrossEntropyLoss << <CudaPlatform::GetCudaBlockCount(batch_size*value_size), CudaPlatform::threadsPerBlock >> > (batch_size*value_size, value_size, output, target, m_read_label_for_target, loss); return CudaPlatform::CudaErrorCheck(cudaPeekAtLastError()); } __global__ void CrossEntropyDiff(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, const bool is_label, neuro_float scale, neuro_float* diff) { CUDA_KERNEL_LOOP(i, N) { const neuro_float target_value = is_label ? GetTargetValueFromLabel(value_size, i, (const neuro_u32*)target) : ((neuro_float*)target)[i]; neuro_float prob = max(output[i], FLT_MIN); diff[i] = scale * target_value / prob; } } bool CrossEntropy::CalcDiff(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* diff) { const neuron_error scale = -normalize_factor(batch_size); const neuro_u32 N = batch_size * value_size; CrossEntropyDiff << <CudaPlatform::GetCudaBlockCount(N), CudaPlatform::threadsPerBlock >> > (N, value_size, output, target, m_read_label_for_target, scale, diff); return CudaPlatform::CudaErrorCheck(cudaPeekAtLastError()); } CrossEntropyMulticlass::CrossEntropyMulticlass(core::cuda::CudaInstance* cuda_instance, bool read_label_for_target) : CUDALossFunction(cuda_instance, read_label_for_target) { } __global__ void CrossEntropyMulticlassLoss(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, bool is_label, neuro_float* loss) { // output가 0일때 문제가 되서 FLT_MIN 사용 CUDA_KERNEL_LOOP(sample, N) { neuro_u32 label = 0; if (is_label) { label = ((neuro_u32*)target)[sample]; } else { const neuro_float* target_p = (neuro_float*)target + sample*value_size; for (neuro_u32 t_index = 1; t_index < value_size; t_index++) { if (target_p[t_index]>target_p[label]) label = t_index; } } loss[sample] = -log(max(output[sample*value_size + label], FLT_MIN)); } } // cross-entropy loss function for multi-class classification neuron_error CrossEntropyMulticlass::CalcLoss(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target) { _NEURO_TENSOR_DATA gpu_loss_buffer(core::math_device_type::cuda, true); gpu_loss_buffer.Alloc(batch_size, 1); CrossEntropyMulticlassLoss << <CudaPlatform::GetCudaBlockCount(batch_size), CudaPlatform::threadsPerBlock >> > (batch_size, value_size, output, target, m_read_label_for_target, gpu_loss_buffer.GetBuffer()); if(!CudaPlatform::CudaErrorCheck(cudaPeekAtLastError())) { DEBUG_OUTPUT(L"failed CrossEntropyMulticlassCalcLoss"); return -1; } neuro_float sum_loss; if (!sum(gpu_loss_buffer.GetSize(), gpu_loss_buffer.GetBuffer(), sum_loss)) { DEBUG_OUTPUT(L"failed sum"); return -1; } return sum_loss; } __global__ void CrossEntropyMulticlassDiff(neuro_u32 N, neuro_u32 value_size, const neuro_float* output, const void* target, bool is_label, neuro_float scale, neuro_float* diff) { CUDA_KERNEL_LOOP(sample, N) { neuro_u32 label = 0; if (is_label) { label = ((neuro_u32*)target)[sample]; } else { const neuro_float* target_p = (neuro_float*)target + sample*value_size; for (neuro_u32 t_index = 1; t_index < value_size; t_index++) { if (target_p[t_index]>target_p[label]) label = t_index; } } neuro_float prob = max(output[sample*value_size + label], FLT_MIN); diff[sample*value_size + label] = scale / prob; } } bool CrossEntropyMulticlass::CalcDiff(neuro_u32 batch_size, neuro_u32 value_size, const neuro_float* output, const void* target, neuro_float* diff) { neuron_error scale = - normalize_factor(batch_size); CrossEntropyMulticlassDiff << <CudaPlatform::GetCudaBlockCount(batch_size), CudaPlatform::threadsPerBlock >> > (batch_size, value_size, output, target, m_read_label_for_target, scale, diff); return CudaPlatform::CudaErrorCheck(cudaPeekAtLastError()); }
db51d158b1359104f1d5859526213cb613aa66e0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "computeScaledHeatmap.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *heatmap = NULL; hipMalloc(&heatmap, XSIZE*YSIZE); size_t heatmap_pitch = 2; int *scaled_heatmap = NULL; hipMalloc(&scaled_heatmap, XSIZE*YSIZE); size_t scaled_heatmap_pitch = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( computeScaledHeatmap), dim3(gridBlock),dim3(threadBlock), 0, 0, heatmap,heatmap_pitch,scaled_heatmap,scaled_heatmap_pitch); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( computeScaledHeatmap), dim3(gridBlock),dim3(threadBlock), 0, 0, heatmap,heatmap_pitch,scaled_heatmap,scaled_heatmap_pitch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( computeScaledHeatmap), dim3(gridBlock),dim3(threadBlock), 0, 0, heatmap,heatmap_pitch,scaled_heatmap,scaled_heatmap_pitch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
db51d158b1359104f1d5859526213cb613aa66e0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "computeScaledHeatmap.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *heatmap = NULL; cudaMalloc(&heatmap, XSIZE*YSIZE); size_t heatmap_pitch = 2; int *scaled_heatmap = NULL; cudaMalloc(&scaled_heatmap, XSIZE*YSIZE); size_t scaled_heatmap_pitch = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); computeScaledHeatmap<<<gridBlock,threadBlock>>>(heatmap,heatmap_pitch,scaled_heatmap,scaled_heatmap_pitch); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { computeScaledHeatmap<<<gridBlock,threadBlock>>>(heatmap,heatmap_pitch,scaled_heatmap,scaled_heatmap_pitch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { computeScaledHeatmap<<<gridBlock,threadBlock>>>(heatmap,heatmap_pitch,scaled_heatmap,scaled_heatmap_pitch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ab24b7dfa64d45c4036984dd32fe6c56d59c8d44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <caffepro/layers/diag4d_operation_layer.h> #include <caffepro/proto/caffe.pb.h> #include <caffepro/utils/utils.h> namespace caffepro { diag4d_operation_layer::diag4d_operation_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = attr_.num_inputs_max = 1; attr_.num_outputs_min = attr_.num_outputs_max = 1; attr_.set_constraint( layer_attribute::CF_ALLOW_INPLACE | layer_attribute::CF_REQUIRE_NDIM_4 | layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future ); } diag4d_operation_layer::~diag4d_operation_layer() { release_all(); } void diag4d_operation_layer::init() { check_input(); scale_ = layer_param_.diag_operation_param().scale(); shift_ = layer_param_.diag_operation_param().shift(); } __global__ static void diag4d_forward(const int count, const int channels, const int spatial_size, const int center_offset, const data_type scale, const data_type shift, const data_type *in, data_type *out) { CUDA_KERNEL_LOOP(index, count) { int hw = index % spatial_size; int c = index / spatial_size % channels; int n = index / spatial_size / channels; if (c == n && hw == center_offset) { out[index] = in[index] * scale + shift; } else { out[index] = in[index]; } } } void diag4d_operation_layer::on_forward(int device_index) { auto &input = *inputs_[0]->get(device_index); int count = input.count(); int center_offset = input.height() / 2 * input.width() + input.width() / 2; KERNEL_CALL(diag4d_forward, count)(count, input.channels(), input.height() * input.width(), center_offset, scale_, shift_, input.gpu_data(), outputs_[0]->get(device_index)->mutable_gpu_data()); } __global__ static void diag4d_backward(const int count, const int channels, const int spatial_size, const int center_offset, const data_type scale, const data_type *top_diff, data_type *bottom_diff, const data_type scale_targets) { CUDA_KERNEL_LOOP(index, count) { int hw = index % spatial_size; int c = index / spatial_size % channels; int n = index / spatial_size / channels; data_type v = top_diff[index]; if (c == n && hw == center_offset) { v *= scale; } if (scale_targets == 0) { bottom_diff[index] = v; } else { bottom_diff[index] = bottom_diff[index] * scale_targets + v; } } } void diag4d_operation_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { if (should_bp(bp_acts, 0)) { data_type beta = get_beta(clear_acts_diff, 0); auto &input = *inputs_[0]->get(device_index); int count = input.count(); int center_offset = input.height() / 2 * input.width() + input.width() / 2; KERNEL_CALL(diag4d_backward, count)(count, input.channels(), input.height() * input.width(), center_offset, scale_, outputs_[0]->get(device_index)->gpu_diff(), input.mutable_gpu_diff(), beta); } } }
ab24b7dfa64d45c4036984dd32fe6c56d59c8d44.cu
#include <caffepro/layers/diag4d_operation_layer.h> #include <caffepro/proto/caffe.pb.h> #include <caffepro/utils/utils.h> namespace caffepro { diag4d_operation_layer::diag4d_operation_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = attr_.num_inputs_max = 1; attr_.num_outputs_min = attr_.num_outputs_max = 1; attr_.set_constraint( layer_attribute::CF_ALLOW_INPLACE | layer_attribute::CF_REQUIRE_NDIM_4 | layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future ); } diag4d_operation_layer::~diag4d_operation_layer() { release_all(); } void diag4d_operation_layer::init() { check_input(); scale_ = layer_param_.diag_operation_param().scale(); shift_ = layer_param_.diag_operation_param().shift(); } __global__ static void diag4d_forward(const int count, const int channels, const int spatial_size, const int center_offset, const data_type scale, const data_type shift, const data_type *in, data_type *out) { CUDA_KERNEL_LOOP(index, count) { int hw = index % spatial_size; int c = index / spatial_size % channels; int n = index / spatial_size / channels; if (c == n && hw == center_offset) { out[index] = in[index] * scale + shift; } else { out[index] = in[index]; } } } void diag4d_operation_layer::on_forward(int device_index) { auto &input = *inputs_[0]->get(device_index); int count = input.count(); int center_offset = input.height() / 2 * input.width() + input.width() / 2; KERNEL_CALL(diag4d_forward, count)(count, input.channels(), input.height() * input.width(), center_offset, scale_, shift_, input.gpu_data(), outputs_[0]->get(device_index)->mutable_gpu_data()); } __global__ static void diag4d_backward(const int count, const int channels, const int spatial_size, const int center_offset, const data_type scale, const data_type *top_diff, data_type *bottom_diff, const data_type scale_targets) { CUDA_KERNEL_LOOP(index, count) { int hw = index % spatial_size; int c = index / spatial_size % channels; int n = index / spatial_size / channels; data_type v = top_diff[index]; if (c == n && hw == center_offset) { v *= scale; } if (scale_targets == 0) { bottom_diff[index] = v; } else { bottom_diff[index] = bottom_diff[index] * scale_targets + v; } } } void diag4d_operation_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { if (should_bp(bp_acts, 0)) { data_type beta = get_beta(clear_acts_diff, 0); auto &input = *inputs_[0]->get(device_index); int count = input.count(); int center_offset = input.height() / 2 * input.width() + input.width() / 2; KERNEL_CALL(diag4d_backward, count)(count, input.channels(), input.height() * input.width(), center_offset, scale_, outputs_[0]->get(device_index)->gpu_diff(), input.mutable_gpu_diff(), beta); } } }
10ed28bc11ba93ef082b59589e71d66545d9b463.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/one_hot_v2_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <typename InT, typename OutT> __global__ void FillOutputKernel(const InT* p_in_data, OutT* p_out_data, const int64_t numel, const int depth) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) { *(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0; } } template <typename DeviceContext, typename InT> struct OneHotV2OpCUDAFunctor { const framework::LoDTensor* in_; framework::LoDTensor* out_; const DeviceContext& ctx_; int depth_; OneHotV2OpCUDAFunctor(const framework::LoDTensor* in, framework::LoDTensor* out, int depth, const DeviceContext& ctx) : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template <typename OutT> void apply() const { auto* p_in_data = in_->data<InT>(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data<OutT>(ctx_.GetPlace()); auto stream = ctx_.stream(); math::set_constant(ctx_, out_, 0.0); hipLaunchKernelGGL(( FillOutputKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, p_in_data, p_out_data, numel, depth_); } }; using LoDTensor = framework::LoDTensor; template <typename DeviceContext, typename T> class OneHotV2CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); int depth = -1; if (context.HasInput("depth_tensor")) { auto* depth_tensor = context.Input<framework::Tensor>("depth_tensor"); if (platform::is_gpu_place(depth_tensor->place())) { framework::Tensor temp; TensorCopySync(*depth_tensor, platform::CPUPlace(), &temp); depth = *temp.data<int32_t>(); } else { depth = *depth_tensor->data<int32_t>(); } auto out_dims = out->dims(); out_dims[out_dims.size() - 1] = depth; out->Resize(out_dims); } else { depth = context.Attr<int>("depth"); } framework::VisitDataType( static_cast<framework::proto::VarType::Type>( context.Attr<int>("dtype")), OneHotV2OpCUDAFunctor<DeviceContext, T>( in, out, depth, context.template device_context<DeviceContext>())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( one_hot_v2, ops::OneHotV2CUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::OneHotV2CUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
10ed28bc11ba93ef082b59589e71d66545d9b463.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/one_hot_v2_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <typename InT, typename OutT> __global__ void FillOutputKernel(const InT* p_in_data, OutT* p_out_data, const int64_t numel, const int depth) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) { *(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0; } } template <typename DeviceContext, typename InT> struct OneHotV2OpCUDAFunctor { const framework::LoDTensor* in_; framework::LoDTensor* out_; const DeviceContext& ctx_; int depth_; OneHotV2OpCUDAFunctor(const framework::LoDTensor* in, framework::LoDTensor* out, int depth, const DeviceContext& ctx) : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template <typename OutT> void apply() const { auto* p_in_data = in_->data<InT>(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data<OutT>(ctx_.GetPlace()); auto stream = ctx_.stream(); math::set_constant(ctx_, out_, 0.0); FillOutputKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( p_in_data, p_out_data, numel, depth_); } }; using LoDTensor = framework::LoDTensor; template <typename DeviceContext, typename T> class OneHotV2CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); int depth = -1; if (context.HasInput("depth_tensor")) { auto* depth_tensor = context.Input<framework::Tensor>("depth_tensor"); if (platform::is_gpu_place(depth_tensor->place())) { framework::Tensor temp; TensorCopySync(*depth_tensor, platform::CPUPlace(), &temp); depth = *temp.data<int32_t>(); } else { depth = *depth_tensor->data<int32_t>(); } auto out_dims = out->dims(); out_dims[out_dims.size() - 1] = depth; out->Resize(out_dims); } else { depth = context.Attr<int>("depth"); } framework::VisitDataType( static_cast<framework::proto::VarType::Type>( context.Attr<int>("dtype")), OneHotV2OpCUDAFunctor<DeviceContext, T>( in, out, depth, context.template device_context<DeviceContext>())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( one_hot_v2, ops::OneHotV2CUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::OneHotV2CUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
3aac4c6ef66bfc45c7f2c068261ece83b39a07dc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define N 1024 //making my own strcpy and and str cat because screw cuda, not giving access to libraries :( __device__ char* nStrCpy(char *dest, const char *src) { int i =0; do { dest[i] = src[1]; } while (src[i++] != 0); return dest; } __device__ char* nStrcat(char *dest, const char *src){ int i =0; while (dest[i] != 0) i++; nStrCpy(dest+1, src); return dest; } //this makes a single password, recursivly adding 2 characters to password every time and removing one from site __device__ void makePassword(char *square, char* site, int position, int direction, int size, char* password) { //x position and y position within square as square is a linear array int x = position%size; int y = position/size; int firstCharP = 0; int secCharP = 0; //if direction is vertical if (direction ==0) { //check every character in the current vertical line for (int i =0; i < size; i++) { //position of new character int newPosition = (i*size) + x; //found a match if (site[0] == square[newPosition]) { //goes up if (newPosition < position) { //first character for password firstCharP = newPosition - size; //if below first line go to bottom if(firstCharP < 0) firstCharP += (size * size); //second character for password secCharP = firstCharP - size; if(secCharP < 0) secCharP += (size*size); //goes down } else { firstCharP = newPosition + size; // if below last line, loop to top if (firstCharP >= (size*size)) firstCharP -= (size*size); secCharP = firstCharP + size; if(secCharP >= (size*size)) secCharP -= (size*size); } } } //switch to horizontal directiuon for next 2 characters direction = 1; //if direction is horizontal } else { for (int i =0; i < size; i++) { int newPosition = (y*size)+i; if (site[0] == square[newPosition]) { //new position to the left of previous, should never be the same if (newPosition < position) { firstCharP = newPosition -1; //if previous row, wrap around to right side instead if ((firstCharP/size) < y || firstCharP == -1) firstCharP += size; secCharP = firstCharP -1; if ((secCharP/size) < y || secCharP == -1) secCharP += size; //new position to right of previous } else { //if on next row wrap to front firstCharP = newPosition +1; if ((firstCharP/size) > y) firstCharP -= size; secCharP = firstCharP +1; if ((secCharP/size) > y) secCharP -= size; } } } //switch to vertical direction for next couple of characters direction = 0; } //go to next character in site name site++; //if more of the password is neeeded if (site[0] != '\n') { //set the next couple of characters password[0] = square[firstCharP]; password[1] = square[secCharP]; //increase pointer to start the next part of password without overwrting previous characters password++; password++; //mor parts of the password! makePassword(square, site, secCharP, direction, size, password); } else { //set the last two characters of the password. password[0] = square[firstCharP]; password[1] = square[secCharP]; } } //get the starting poisition of the password within the gride, i.e. start at top row, and travel through the domain name __device__ int getStartPosition(char *square, char *site, int size) { int position =0; //find the atarting position within the first row for (int i =0; i < size; i++) { if (square[i] == site[0]) position = i; } //direction 0 is going down, as it starts int direction = 0; //doing 6 characters only, because apparently I hate make modularized code the first time for (int i =1; i < 6; i++) { //x and y position within a linear array int x = position%size; int y = position/size; //check all characters in row/colums for (int j = 0; j < size; j++) { //vertical directions if (direction ==0) { //it found the next character! if (site[i] == square[(j * size) + x ]) { position = (j * size) +x; direction = 1; break; } //horizontal direction } else { //it found the nest character! if (site[i] == square[(y * size) + j]) { position = (y* size) + j; direction = 0; break; } } } } //return the starting poistion... because that's the point of this function ... dur return position; } //make a random password __global__ void randomWords(char *square, char *passwords, int size, int *c, int amount) { //that id though int tid = blockIdx.x*blockDim.x+threadIdx.x; //cuda random intitalizers hiprandState_t state; hiprand_init(tid, 1, 2, &state); //make a certain number of passwords per core for (int a = 0; a < amount; a++) { //starting position for this password int tidNum = ((tid * amount) + a) *24; passwords[(tidNum)] = square[(hiprand(&state) % size)]; //7 characters for the site, 6 and a \n char site[7]; site[0] = passwords[tidNum]; site[6] = '\n'; //make 6 random characters for (int i=1; i < 6; i++) { //make sure 2 characters do not repeat do { passwords[i + (tidNum)] = square[(hiprand(&state) % size)]; } while (passwords[(i-1) +(tidNum)] == passwords[i + (tidNum)]); //set random character site[i] = passwords[i + (tidNum)]; } // add that ' -> ' Miller wanted passwords[7 + (tidNum)] = ' '; passwords[8 + (tidNum)] = '-'; passwords[9 + (tidNum)] = '>'; passwords[10 + (tidNum)] = ' '; //lets get that starting position int position = getStartPosition(square, site, size); //stored the startingposition within c for debuggin puroposes //I left this in here becuase it could be useful if I ever come back to this project c[(tid * amount)+ a] = position; //create the password object char *password; password = (char *)malloc(sizeof(char) *13); //generate that password finally makePassword(square, site, position, 1, size, password); //save the password in the passwords array that the main program can access for(int i = 0; i < 12; i++) { passwords[11 + i + (tidNum)] = password[i]; } } } int main( void ) { //used to organize cores on cuda dim3 gridsize, blocksize; //get size of the grid int size = 10; dim3 gridsize, blocksize; //printf("Please input a size of grid to be tested (integer number only): "); //scanf("%d", &size); //I want at least total passwords (I used 12,000 because when creating 10,000 passwords //out of a possible 100,000 there are bound to be some repeats int total = 12000; int amount = total / N; amount++; total = amount * N; //get the file to be read char file[] = "grid.txt"; //printf("Insert a file containing your latin square: "); //scanf("%s", file); //allocate memory for the grid char grid[size][size]; char *square; hipMallocManaged((void**)&square, size * size * sizeof(char)); //allocate memory for the grid char *passwords; printf("total: %d\n", total); hipMallocManaged((void**)&passwords, sizeof(char) * 24 * total); //open grid file to read grid FILE *file1 = fopen(file, "r"); //copy each character from grid file to grid object for (int i=0; i < size; i++) { for (int j=0; j < size; j++) { fscanf(file1, "%c\n", &grid[i][j]); } } //transfer the grid to a linear array for(int i=0; i < size;i++) { for(int j=0; j < size; j++) { square[size * i + j] = grid[i][j]; } } //close the grid file fclose(file1); // allocate the memory on the GPU, this was used for saving the starting positions of each password int *c; hipMallocManaged( (void**)&c, N * amount * sizeof(int)); //I randomly chose 16 as the block size, it seems like a good number blocksize.x = 16; gridsize.x = N/blocksize.x; //this activates some cool cuda stuff hipLaunchKernelGGL(( randomWords), dim3(gridsize.x), dim3(blocksize.x), 0, 0, square, passwords, size, c, amount); hipDeviceSynchronize(); //outpt file brah FILE * f = fopen("/home/student/stahlysr/output.txt", "w"); //lets make sure that file exists brh if (f == NULL) { printf("error opening output.txt\n"); exit(1); } //copy the passwords to the file one character at a time. oh yeah the effeciency broseph for (int i=0; i<total; i++) { char * output = (char *)malloc(sizeof(char) * 24); for (int j=0; j<23;j++) { fprintf(f, "%c", passwords[j + (i * 24)]); } fprintf(f, "\n"); } // free the memory allocated on the GPU, close the file and you are done Tyranbrosaurus Rex! fclose(f); hipFree( c ); hipFree( square ); hipFree( passwords ); return 0; }
3aac4c6ef66bfc45c7f2c068261ece83b39a07dc.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #define N 1024 //making my own strcpy and and str cat because screw cuda, not giving access to libraries :( __device__ char* nStrCpy(char *dest, const char *src) { int i =0; do { dest[i] = src[1]; } while (src[i++] != 0); return dest; } __device__ char* nStrcat(char *dest, const char *src){ int i =0; while (dest[i] != 0) i++; nStrCpy(dest+1, src); return dest; } //this makes a single password, recursivly adding 2 characters to password every time and removing one from site __device__ void makePassword(char *square, char* site, int position, int direction, int size, char* password) { //x position and y position within square as square is a linear array int x = position%size; int y = position/size; int firstCharP = 0; int secCharP = 0; //if direction is vertical if (direction ==0) { //check every character in the current vertical line for (int i =0; i < size; i++) { //position of new character int newPosition = (i*size) + x; //found a match if (site[0] == square[newPosition]) { //goes up if (newPosition < position) { //first character for password firstCharP = newPosition - size; //if below first line go to bottom if(firstCharP < 0) firstCharP += (size * size); //second character for password secCharP = firstCharP - size; if(secCharP < 0) secCharP += (size*size); //goes down } else { firstCharP = newPosition + size; // if below last line, loop to top if (firstCharP >= (size*size)) firstCharP -= (size*size); secCharP = firstCharP + size; if(secCharP >= (size*size)) secCharP -= (size*size); } } } //switch to horizontal directiuon for next 2 characters direction = 1; //if direction is horizontal } else { for (int i =0; i < size; i++) { int newPosition = (y*size)+i; if (site[0] == square[newPosition]) { //new position to the left of previous, should never be the same if (newPosition < position) { firstCharP = newPosition -1; //if previous row, wrap around to right side instead if ((firstCharP/size) < y || firstCharP == -1) firstCharP += size; secCharP = firstCharP -1; if ((secCharP/size) < y || secCharP == -1) secCharP += size; //new position to right of previous } else { //if on next row wrap to front firstCharP = newPosition +1; if ((firstCharP/size) > y) firstCharP -= size; secCharP = firstCharP +1; if ((secCharP/size) > y) secCharP -= size; } } } //switch to vertical direction for next couple of characters direction = 0; } //go to next character in site name site++; //if more of the password is neeeded if (site[0] != '\n') { //set the next couple of characters password[0] = square[firstCharP]; password[1] = square[secCharP]; //increase pointer to start the next part of password without overwrting previous characters password++; password++; //mor parts of the password! makePassword(square, site, secCharP, direction, size, password); } else { //set the last two characters of the password. password[0] = square[firstCharP]; password[1] = square[secCharP]; } } //get the starting poisition of the password within the gride, i.e. start at top row, and travel through the domain name __device__ int getStartPosition(char *square, char *site, int size) { int position =0; //find the atarting position within the first row for (int i =0; i < size; i++) { if (square[i] == site[0]) position = i; } //direction 0 is going down, as it starts int direction = 0; //doing 6 characters only, because apparently I hate make modularized code the first time for (int i =1; i < 6; i++) { //x and y position within a linear array int x = position%size; int y = position/size; //check all characters in row/colums for (int j = 0; j < size; j++) { //vertical directions if (direction ==0) { //it found the next character! if (site[i] == square[(j * size) + x ]) { position = (j * size) +x; direction = 1; break; } //horizontal direction } else { //it found the nest character! if (site[i] == square[(y * size) + j]) { position = (y* size) + j; direction = 0; break; } } } } //return the starting poistion... because that's the point of this function ... dur return position; } //make a random password __global__ void randomWords(char *square, char *passwords, int size, int *c, int amount) { //that id though int tid = blockIdx.x*blockDim.x+threadIdx.x; //cuda random intitalizers curandState_t state; curand_init(tid, 1, 2, &state); //make a certain number of passwords per core for (int a = 0; a < amount; a++) { //starting position for this password int tidNum = ((tid * amount) + a) *24; passwords[(tidNum)] = square[(curand(&state) % size)]; //7 characters for the site, 6 and a \n char site[7]; site[0] = passwords[tidNum]; site[6] = '\n'; //make 6 random characters for (int i=1; i < 6; i++) { //make sure 2 characters do not repeat do { passwords[i + (tidNum)] = square[(curand(&state) % size)]; } while (passwords[(i-1) +(tidNum)] == passwords[i + (tidNum)]); //set random character site[i] = passwords[i + (tidNum)]; } // add that ' -> ' Miller wanted passwords[7 + (tidNum)] = ' '; passwords[8 + (tidNum)] = '-'; passwords[9 + (tidNum)] = '>'; passwords[10 + (tidNum)] = ' '; //lets get that starting position int position = getStartPosition(square, site, size); //stored the startingposition within c for debuggin puroposes //I left this in here becuase it could be useful if I ever come back to this project c[(tid * amount)+ a] = position; //create the password object char *password; password = (char *)malloc(sizeof(char) *13); //generate that password finally makePassword(square, site, position, 1, size, password); //save the password in the passwords array that the main program can access for(int i = 0; i < 12; i++) { passwords[11 + i + (tidNum)] = password[i]; } } } int main( void ) { //used to organize cores on cuda dim3 gridsize, blocksize; //get size of the grid int size = 10; dim3 gridsize, blocksize; //printf("Please input a size of grid to be tested (integer number only): "); //scanf("%d", &size); //I want at least total passwords (I used 12,000 because when creating 10,000 passwords //out of a possible 100,000 there are bound to be some repeats int total = 12000; int amount = total / N; amount++; total = amount * N; //get the file to be read char file[] = "grid.txt"; //printf("Insert a file containing your latin square: "); //scanf("%s", file); //allocate memory for the grid char grid[size][size]; char *square; cudaMallocManaged((void**)&square, size * size * sizeof(char)); //allocate memory for the grid char *passwords; printf("total: %d\n", total); cudaMallocManaged((void**)&passwords, sizeof(char) * 24 * total); //open grid file to read grid FILE *file1 = fopen(file, "r"); //copy each character from grid file to grid object for (int i=0; i < size; i++) { for (int j=0; j < size; j++) { fscanf(file1, "%c\n", &grid[i][j]); } } //transfer the grid to a linear array for(int i=0; i < size;i++) { for(int j=0; j < size; j++) { square[size * i + j] = grid[i][j]; } } //close the grid file fclose(file1); // allocate the memory on the GPU, this was used for saving the starting positions of each password int *c; cudaMallocManaged( (void**)&c, N * amount * sizeof(int)); //I randomly chose 16 as the block size, it seems like a good number blocksize.x = 16; gridsize.x = N/blocksize.x; //this activates some cool cuda stuff randomWords<<<gridsize.x, blocksize.x>>>(square, passwords, size, c, amount); cudaDeviceSynchronize(); //outpt file brah FILE * f = fopen("/home/student/stahlysr/output.txt", "w"); //lets make sure that file exists brh if (f == NULL) { printf("error opening output.txt\n"); exit(1); } //copy the passwords to the file one character at a time. oh yeah the effeciency broseph for (int i=0; i<total; i++) { char * output = (char *)malloc(sizeof(char) * 24); for (int j=0; j<23;j++) { fprintf(f, "%c", passwords[j + (i * 24)]); } fprintf(f, "\n"); } // free the memory allocated on the GPU, close the file and you are done Tyranbrosaurus Rex! fclose(f); cudaFree( c ); cudaFree( square ); cudaFree( passwords ); return 0; }
7681585de8d1bf6fb28e94db785d57399cb2f85b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> /* square root of number of threads in a block (the number of threads in a block is NT^2) */ #define NT 32 /* length of the target domain */ #define L 10.0 /* number of division for the discretization of the target domain */ #define N 256 /* dimensionless time step size (theta = D * dt / dx^2) */ #define THETA 0.1 /* number of iterations */ #define M 2000 /* constants on a GPU */ __device__ __constant__ int n; __device__ __constant__ double theta; //GPU functions----------------------------------------------------------------- __global__ void diffusion_global(double *field_device, double *field_device_new) { int i_global; int j_global; int i_left, i_right; int j_top, j_bottom; i_global = blockDim.x * blockIdx.x + threadIdx.x; if(i_global < n) { i_right = (i_global + 1) % n; i_left = (i_global - 1 + n) % n; for(j_global = threadIdx.y; j_global < n; j_global += NT) { j_top = (j_global + 1) % n; j_bottom = (j_global - 1 + n) % n; field_device_new[i_global * n + j_global] = field_device[i_global * n + j_global] + theta * (field_device[i_right * n + j_global] + field_device[i_left * n + j_global] + field_device[i_global * n + j_top] + field_device[i_global * n + j_bottom] - 4.0 * field_device[i_global * n + j_global]); } } } __global__ void diffusion_shared(double *field_device, double *field_device_new) { int i_global; int j_global; int i_shared; int j_shared; int i_left, i_right; int j_top, j_bottom; double field_register; __shared__ double field_shared[(NT + 2) * (NT + 2)]; i_global = blockDim.x * blockIdx.x + threadIdx.x; i_shared = threadIdx.x + 1; j_shared = threadIdx.y + 1; if(i_global < n) { for(j_global = threadIdx.y; j_global < n; j_global += NT) { //copy field from global to shared---------------------- field_register = field_device[i_global * n + j_global]; field_shared[i_shared * (NT + 2) + j_shared] = field_register; if(i_shared == 1) { i_left = (i_global - 1 + n) % n; field_shared[0 * (NT + 2) + j_shared] = field_device[i_left * n + j_global]; } else if(i_shared == NT) { i_right = (i_global + 1) % n; field_shared[(NT + 1) * (NT + 2) + j_shared] = field_device[i_right * n + j_global]; } if(j_shared == 1) { j_bottom = (j_global - 1 + n) % n; field_shared[i_shared * (NT + 2) + 0] = field_device[i_global * n + j_bottom]; } else if(j_shared == NT) { j_top = (j_global + 1) % n; field_shared[i_shared * (NT + 2) + (NT + 1)] = field_device[i_global * n + j_top]; } __syncthreads(); //calculate field evolution----------------------------- field_device_new[i_global * n + j_global] = field_register + theta * (field_shared[(i_shared + 1) * (NT + 2) + j_shared] + field_shared[(i_shared - 1) * (NT + 2) + j_shared] + field_shared[i_shared * (NT + 2) + (j_shared + 1)] + field_shared[i_shared * (NT + 2) + (j_shared - 1)] - 4.0 * field_register); } } } //Host functions---------------------------------------------------------------- void init_field(double *field_host, int n_host, int l_host) { int i; int j; double x; double y; double dx = l_host / (double)n_host; double dy = l_host / (double)n_host; double midst = l_host * 0.5; for(i = 0; i < n_host; i += 1) { x = (double)i * dx; for(j = 0; j < n_host; j += 1) { y = (double)j * dy; if((x > midst && y > midst) || (x < midst && y < midst)) { field_host[n_host * j + i] = 1.0; } else { field_host[n_host * j + i] = 0.0; } } } } void flip_ij(int *i, int *j) { int i_temp; i_temp = *i; *i = *j; *j = i_temp; } void print_field(FILE *file_write, double *field, int n, double l) { int i; int j; double x; double y; double d = l/(double)n; for(i = 0; i < N; i += 1) { y = (double)j * d; for(j = 0; j < N; j += 1) { x = (double)i * d; fprintf(file_write, "%f %f %f\n", x, y, field[i * n + j]); } } } void diffusion_host(double *field_host, double *field_host_new, int n_host, double theta_host) { int i; int j; int i_right, i_left; int j_top, j_bottom; for(i = 0; i < n_host; i += 1) { i_right = (i + 1) % n_host; i_left = (i - 1 + n_host) % n_host; for(j = 0; j < n_host; j += 1) { j_top = (j + 1) % n_host; j_bottom = (j - 1 + n_host) % n_host; field_host_new[i * n_host + j] = field_host[i * n_host + j] + theta_host * (field_host[i_right * n_host + j] + field_host[i_left * n_host + j] + field_host[i * n_host + j_top] + field_host[i * n_host + j_bottom] - 4.0 * field_host[i * n_host + j]); } } } double check_residue(double *field_host, double *field_device, int n_host) { int i; double residue = 0.0; for(i = 0; i < n_host * n_host; i += 1) { residue += (field_host[i] - field_device[i]) * (field_host[i] - field_device[i]);; } return residue; } int main(void) { //delcare variavles------------------------------------------------------------- int i; int j; int k; int n_host; int n_square; int iteration; int n_blocks; double l_host; double theta_host; dim3 dim_threads; double *field_host[2]; double *field_device[2]; double *result_host; double *result_global_host; double *result_shared_host; FILE *file_write; char filename_write[256]; clock_t start, end; //initialize-------------------------------------------------------------------- //set variables--------------------------------------------------------- n_host = N; n_square = N * N; l_host = L; theta_host = THETA; dim_threads.x = NT; dim_threads.y = NT; dim_threads.z = 1; n_blocks = (int)(ceil((double)n_host / NT)); iteration = M; //allocate memories----------------------------------------------------- hipMemcpyToSymbol(n, &n_host, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(theta, &theta_host, sizeof(double), 0, hipMemcpyHostToDevice); hipHostMalloc((void **)&field_host[0], n_square * sizeof(double), hipHostMallocMapped); hipHostMalloc((void **)&field_host[1], n_square * sizeof(double), hipHostMallocMapped); hipHostMalloc((void **)&result_global_host, n_square * sizeof(double), hipHostMallocMapped); hipHostMalloc((void **)&result_shared_host, n_square * sizeof(double), hipHostMallocMapped); hipMalloc((void **)&field_device[0], n_square * sizeof(double)); hipMalloc((void **)&field_device[1], n_square * sizeof(double)); result_host = (double *)malloc(n_square * sizeof(double)); //calculate on CPU-------------------------------------------------------------- //initialize field------------------------------------------------------ init_field(field_host[0], n_host, l_host); start = clock(); //iteration------------------------------------------------------------- i = 0; j = 1; for(k = 0; k < iteration; k += 1) { diffusion_host(field_host[i], field_host[j], n_host, theta_host); flip_ij(&i, &j); } //save and print out---------------------------------------------------- memcpy(result_host, field_host[i], n_square * sizeof(double)); end = clock(); printf("host:%ld\n", end - start); /*sprintf(filename_write, "result_host.txt"); file_write = fopen(filename_write, "w"); print_field(file_write, result_host, n_host, l_host); fclose(file_write);*/ //calculate using only global memory-------------------------------------------- //initialize field------------------------------------------------------ init_field(field_host[0], n_host, l_host); start = clock(); hipMemcpy(field_device[0], field_host[0], n_square * sizeof(double), hipMemcpyHostToDevice); //iteration------------------------------------------------------------- i = 0; j = 1; for(k = 0; k < iteration; k += 1) { hipLaunchKernelGGL(( diffusion_global), dim3(n_blocks), dim3(dim_threads), 0, 0, field_device[i], field_device[j]); hipDeviceSynchronize(); flip_ij(&i, &j); } //copy to host and print out-------------------------------------------- hipMemcpy(result_global_host, field_device[i], n_square * sizeof(double), hipMemcpyDeviceToHost); end = clock(); printf("global:%ld\n", end - start); /*sprintf(filename_write, "result_global.txt"); file_write = fopen(filename_write, "w"); print_field(file_write, result_global_host, n_host, l_host); fclose(file_write);*/ //calculate using shared memory------------------------------------------------- //initialize field------------------------------------------------------ init_field(field_host[0], n_host, l_host); start = clock(); hipMemcpy(field_device[0], field_host[0], n_square * sizeof(double), hipMemcpyHostToDevice); //iteration------------------------------------------------------------- i = 0; j = 1; for(k = 0; k < iteration; k += 1) { hipLaunchKernelGGL(( diffusion_shared), dim3(n_blocks), dim3(dim_threads), 0, 0, field_device[i], field_device[j]); hipDeviceSynchronize(); flip_ij(&i, &j); } //copy to host and print out-------------------------------------------- hipMemcpy(result_shared_host, field_device[i], n_square * sizeof(double), hipMemcpyDeviceToHost); end = clock(); printf("shared:%ld\n", end - start); /*sprintf(filename_write, "result_shared.txt"); file_write = fopen(filename_write, "w"); print_field(file_write, result_shared_host, n_host, l_host); fclose(file_write);*/ //check answers----------------------------------------------------------------- printf("answers\n"); printf("global:%f\n", check_residue(result_host, result_global_host, n_host)); printf("shared:%f\n", check_residue(result_host, result_shared_host, n_host)); //finalize---------------------------------------------------------------------- hipHostFree(field_host[0]); hipHostFree(field_host[1]); hipHostFree(result_global_host); hipHostFree(result_shared_host); hipFree(field_device[0]); hipFree(field_device[1]); free(result_host); return 0; }
7681585de8d1bf6fb28e94db785d57399cb2f85b.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> /* square root of number of threads in a block (the number of threads in a block is NT^2) */ #define NT 32 /* length of the target domain */ #define L 10.0 /* number of division for the discretization of the target domain */ #define N 256 /* dimensionless time step size (theta = D * dt / dx^2) */ #define THETA 0.1 /* number of iterations */ #define M 2000 /* constants on a GPU */ __device__ __constant__ int n; __device__ __constant__ double theta; //GPU functions----------------------------------------------------------------- __global__ void diffusion_global(double *field_device, double *field_device_new) { int i_global; int j_global; int i_left, i_right; int j_top, j_bottom; i_global = blockDim.x * blockIdx.x + threadIdx.x; if(i_global < n) { i_right = (i_global + 1) % n; i_left = (i_global - 1 + n) % n; for(j_global = threadIdx.y; j_global < n; j_global += NT) { j_top = (j_global + 1) % n; j_bottom = (j_global - 1 + n) % n; field_device_new[i_global * n + j_global] = field_device[i_global * n + j_global] + theta * (field_device[i_right * n + j_global] + field_device[i_left * n + j_global] + field_device[i_global * n + j_top] + field_device[i_global * n + j_bottom] - 4.0 * field_device[i_global * n + j_global]); } } } __global__ void diffusion_shared(double *field_device, double *field_device_new) { int i_global; int j_global; int i_shared; int j_shared; int i_left, i_right; int j_top, j_bottom; double field_register; __shared__ double field_shared[(NT + 2) * (NT + 2)]; i_global = blockDim.x * blockIdx.x + threadIdx.x; i_shared = threadIdx.x + 1; j_shared = threadIdx.y + 1; if(i_global < n) { for(j_global = threadIdx.y; j_global < n; j_global += NT) { //copy field from global to shared---------------------- field_register = field_device[i_global * n + j_global]; field_shared[i_shared * (NT + 2) + j_shared] = field_register; if(i_shared == 1) { i_left = (i_global - 1 + n) % n; field_shared[0 * (NT + 2) + j_shared] = field_device[i_left * n + j_global]; } else if(i_shared == NT) { i_right = (i_global + 1) % n; field_shared[(NT + 1) * (NT + 2) + j_shared] = field_device[i_right * n + j_global]; } if(j_shared == 1) { j_bottom = (j_global - 1 + n) % n; field_shared[i_shared * (NT + 2) + 0] = field_device[i_global * n + j_bottom]; } else if(j_shared == NT) { j_top = (j_global + 1) % n; field_shared[i_shared * (NT + 2) + (NT + 1)] = field_device[i_global * n + j_top]; } __syncthreads(); //calculate field evolution----------------------------- field_device_new[i_global * n + j_global] = field_register + theta * (field_shared[(i_shared + 1) * (NT + 2) + j_shared] + field_shared[(i_shared - 1) * (NT + 2) + j_shared] + field_shared[i_shared * (NT + 2) + (j_shared + 1)] + field_shared[i_shared * (NT + 2) + (j_shared - 1)] - 4.0 * field_register); } } } //Host functions---------------------------------------------------------------- void init_field(double *field_host, int n_host, int l_host) { int i; int j; double x; double y; double dx = l_host / (double)n_host; double dy = l_host / (double)n_host; double midst = l_host * 0.5; for(i = 0; i < n_host; i += 1) { x = (double)i * dx; for(j = 0; j < n_host; j += 1) { y = (double)j * dy; if((x > midst && y > midst) || (x < midst && y < midst)) { field_host[n_host * j + i] = 1.0; } else { field_host[n_host * j + i] = 0.0; } } } } void flip_ij(int *i, int *j) { int i_temp; i_temp = *i; *i = *j; *j = i_temp; } void print_field(FILE *file_write, double *field, int n, double l) { int i; int j; double x; double y; double d = l/(double)n; for(i = 0; i < N; i += 1) { y = (double)j * d; for(j = 0; j < N; j += 1) { x = (double)i * d; fprintf(file_write, "%f %f %f\n", x, y, field[i * n + j]); } } } void diffusion_host(double *field_host, double *field_host_new, int n_host, double theta_host) { int i; int j; int i_right, i_left; int j_top, j_bottom; for(i = 0; i < n_host; i += 1) { i_right = (i + 1) % n_host; i_left = (i - 1 + n_host) % n_host; for(j = 0; j < n_host; j += 1) { j_top = (j + 1) % n_host; j_bottom = (j - 1 + n_host) % n_host; field_host_new[i * n_host + j] = field_host[i * n_host + j] + theta_host * (field_host[i_right * n_host + j] + field_host[i_left * n_host + j] + field_host[i * n_host + j_top] + field_host[i * n_host + j_bottom] - 4.0 * field_host[i * n_host + j]); } } } double check_residue(double *field_host, double *field_device, int n_host) { int i; double residue = 0.0; for(i = 0; i < n_host * n_host; i += 1) { residue += (field_host[i] - field_device[i]) * (field_host[i] - field_device[i]);; } return residue; } int main(void) { //delcare variavles------------------------------------------------------------- int i; int j; int k; int n_host; int n_square; int iteration; int n_blocks; double l_host; double theta_host; dim3 dim_threads; double *field_host[2]; double *field_device[2]; double *result_host; double *result_global_host; double *result_shared_host; FILE *file_write; char filename_write[256]; clock_t start, end; //initialize-------------------------------------------------------------------- //set variables--------------------------------------------------------- n_host = N; n_square = N * N; l_host = L; theta_host = THETA; dim_threads.x = NT; dim_threads.y = NT; dim_threads.z = 1; n_blocks = (int)(ceil((double)n_host / NT)); iteration = M; //allocate memories----------------------------------------------------- cudaMemcpyToSymbol(n, &n_host, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(theta, &theta_host, sizeof(double), 0, cudaMemcpyHostToDevice); cudaHostAlloc((void **)&field_host[0], n_square * sizeof(double), cudaHostAllocMapped); cudaHostAlloc((void **)&field_host[1], n_square * sizeof(double), cudaHostAllocMapped); cudaHostAlloc((void **)&result_global_host, n_square * sizeof(double), cudaHostAllocMapped); cudaHostAlloc((void **)&result_shared_host, n_square * sizeof(double), cudaHostAllocMapped); cudaMalloc((void **)&field_device[0], n_square * sizeof(double)); cudaMalloc((void **)&field_device[1], n_square * sizeof(double)); result_host = (double *)malloc(n_square * sizeof(double)); //calculate on CPU-------------------------------------------------------------- //initialize field------------------------------------------------------ init_field(field_host[0], n_host, l_host); start = clock(); //iteration------------------------------------------------------------- i = 0; j = 1; for(k = 0; k < iteration; k += 1) { diffusion_host(field_host[i], field_host[j], n_host, theta_host); flip_ij(&i, &j); } //save and print out---------------------------------------------------- memcpy(result_host, field_host[i], n_square * sizeof(double)); end = clock(); printf("host:%ld\n", end - start); /*sprintf(filename_write, "result_host.txt"); file_write = fopen(filename_write, "w"); print_field(file_write, result_host, n_host, l_host); fclose(file_write);*/ //calculate using only global memory-------------------------------------------- //initialize field------------------------------------------------------ init_field(field_host[0], n_host, l_host); start = clock(); cudaMemcpy(field_device[0], field_host[0], n_square * sizeof(double), cudaMemcpyHostToDevice); //iteration------------------------------------------------------------- i = 0; j = 1; for(k = 0; k < iteration; k += 1) { diffusion_global<<<n_blocks, dim_threads>>>(field_device[i], field_device[j]); cudaDeviceSynchronize(); flip_ij(&i, &j); } //copy to host and print out-------------------------------------------- cudaMemcpy(result_global_host, field_device[i], n_square * sizeof(double), cudaMemcpyDeviceToHost); end = clock(); printf("global:%ld\n", end - start); /*sprintf(filename_write, "result_global.txt"); file_write = fopen(filename_write, "w"); print_field(file_write, result_global_host, n_host, l_host); fclose(file_write);*/ //calculate using shared memory------------------------------------------------- //initialize field------------------------------------------------------ init_field(field_host[0], n_host, l_host); start = clock(); cudaMemcpy(field_device[0], field_host[0], n_square * sizeof(double), cudaMemcpyHostToDevice); //iteration------------------------------------------------------------- i = 0; j = 1; for(k = 0; k < iteration; k += 1) { diffusion_shared<<<n_blocks, dim_threads>>>(field_device[i], field_device[j]); cudaDeviceSynchronize(); flip_ij(&i, &j); } //copy to host and print out-------------------------------------------- cudaMemcpy(result_shared_host, field_device[i], n_square * sizeof(double), cudaMemcpyDeviceToHost); end = clock(); printf("shared:%ld\n", end - start); /*sprintf(filename_write, "result_shared.txt"); file_write = fopen(filename_write, "w"); print_field(file_write, result_shared_host, n_host, l_host); fclose(file_write);*/ //check answers----------------------------------------------------------------- printf("answers\n"); printf("global:%f\n", check_residue(result_host, result_global_host, n_host)); printf("shared:%f\n", check_residue(result_host, result_shared_host, n_host)); //finalize---------------------------------------------------------------------- cudaFreeHost(field_host[0]); cudaFreeHost(field_host[1]); cudaFreeHost(result_global_host); cudaFreeHost(result_shared_host); cudaFree(field_device[0]); cudaFree(field_device[1]); free(result_host); return 0; }
278dc5a58d7bb0cbf189e5fa9c9fa97776211537.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> unsigned int filter_radius = 16; #define FILTER_LENGTH (2 * 16 + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) #define accuracy 0.05 #define TYPE float #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } //GPU code __constant__ TYPE d_Filter[FILTER_LENGTH]; __global__ void convolROWS(TYPE* d_Buffer, TYPE* d_Input, int imageH, int imageW, int filterR) { int k; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tx = threadIdx.y * blockDim.x + threadIdx.x; int threadId = blockId * (blockDim.x * blockDim.y) + tx; TYPE sum = 0; //tile = threadBlock = 1024 __shared__ TYPE s_input[1024]; //load to shared memory s_input[tx] = d_Input[threadId]; __syncthreads(); // wait for all shared data to load //result for (k = -filterR; k <= filterR; k++) { int d = threadIdx.x + k; if (d >= 0 && (d < imageW)) { sum += s_input[tx + k] * d_Filter[filterR - k]; } d_Buffer[threadId] = sum; } } __global__ void convolCOL(TYPE* d_Output_GPU, TYPE* d_Buffer, int imageH, int imageW, int filterR) { int k; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int ty = threadIdx.x * blockDim.y + threadIdx.y; TYPE sum = 0; //tile = threadBlock = 1024 __shared__ TYPE s_input[1024]; // load to shared memory s_input[ty] = d_Buffer[threadIdx.y*imageW + blockId*blockDim.x + threadIdx.x]; __syncthreads(); // wait for all shared data to load // result for (k = -filterR; k <= filterR; k++) { int d = threadIdx.y + k; if (d >= 0 && (d < imageW)) { sum += s_input[ty + k] * d_Filter[filterR - k]; } d_Output_GPU[threadIdx.y*imageW + blockId*blockDim.x + threadIdx.x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(TYPE *h_Dst, TYPE *h_Src, TYPE *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { TYPE sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(TYPE *h_Dst, TYPE *h_Src, TYPE *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { TYPE sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { TYPE *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU, *d_Input, *d_Output_GPU, *d_Buffer; int imageW; int imageH; unsigned int i; //printf("Enter filter radius : "); //scanf("%d", &filter_radius); //filter_radius = 16; printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH); scanf("%d", &imageW); imageH = imageW; // printf("Image Width x Height = %i x %i\n\n", imageW, imageH); // printf("Allocating and initializing host arrays...\n"); h_Filter = (TYPE *)malloc(FILTER_LENGTH * sizeof(TYPE)); h_Input = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); h_Buffer = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); h_OutputCPU = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); h_OutputGPU = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); if( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL ){ printf("Malloc allocation problem on host, exiting...\n"); return(1); } srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (TYPE)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (TYPE)rand() / ((TYPE)RAND_MAX / 255) + (TYPE)rand() / (TYPE)RAND_MAX; } // comment out CPU code since it is correct /* printf("CPU computation...\n"); clock_t begin = clock(); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); clock_t end = clock(); double cpu_time = (double)(end - begin) / CLOCKS_PER_SEC; printf("Cpu time: %lf\n",cpu_time); */ hipMalloc((void**)&d_Input, imageW * imageH * sizeof(TYPE)); hipMalloc((void**)&d_Output_GPU, imageW * imageH * sizeof(TYPE)); hipMalloc((void**)&d_Buffer, imageW * imageH * sizeof(TYPE)); if(!(d_Input || d_Output_GPU || d_Buffer)){ printf("Malloc allocation problem on device, exiting.. \n"); return(1); } //TIME hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float tot_time=0, timer = 0; hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(TYPE), hipMemcpyHostToDevice); // d_Filter is in __constant__ memory hipMemcpyToSymbol(d_Filter, h_Filter, sizeof(TYPE) * FILTER_LENGTH); cudaCheckError(); //block & grid dimensions int x_block, y_block, x_grid, y_grid; if(imageW < 1024){ x_block = imageW; y_block = 1024 / imageW; x_grid = 1; y_grid = (imageW*imageW)/1024; } else{ if( imageW == 1024){ x_block = 1024; y_block = 1; x_grid = 1; y_grid = imageW; } else{ x_block = 1024; y_block = 1; x_grid = (imageW*imageW)/1024; y_grid = imageH; } } dim3 grid(x_grid , y_grid); dim3 block(x_block, y_block); hipEventRecord(start); hipLaunchKernelGGL(( convolROWS), dim3(grid) , dim3(block), 0, 0, d_Buffer, d_Input, imageH, imageW, filter_radius); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&timer, start, stop); printf("Kernel time for rows: %f\n", timer); tot_time += timer; //wait 1st kernel to finish hipDeviceSynchronize(); //check for errors cudaCheckError(); dim3 grid2(y_grid , x_grid); dim3 block2(y_block, x_block); hipEventRecord(start); hipLaunchKernelGGL(( convolCOL), dim3(grid2) , dim3(block2), 0, 0, d_Output_GPU, d_Buffer, imageH, imageW, filter_radius); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&timer, start, stop); printf("Kernel time for col: %f\n", timer); tot_time += timer; //wait to finish hipDeviceSynchronize(); //check for errors cudaCheckError(); //copy output from device to host hipMemcpy(h_OutputGPU, d_Output_GPU, imageW * imageH * sizeof(TYPE), hipMemcpyDeviceToHost); printf("Total time for GPU: %f\n", tot_time); // comment out CPU GPC comparison code since it is correct //compare /* TYPE max = 0.0; unsigned int counter=0; for (i = 0; i < imageW * imageH; i++) { if(ABS(h_OutputGPU[i] - h_OutputCPU[i]) > max){ max = ABS(h_OutputGPU[i] - h_OutputCPU[i]); printf("iteration= %d max=%10g \n",i, max); counter++; } } printf("for %d filter, max= %d, counter=%d\n", filter_radius, max, counter); */ // free all the allocated memory free(h_OutputCPU); free(h_OutputGPU); free(h_Buffer); free(h_Input); free(h_Filter); hipFree(d_Input); hipFree(d_Output_GPU); hipDeviceReset(); return 0; }
278dc5a58d7bb0cbf189e5fa9c9fa97776211537.cu
#include <stdio.h> #include <stdlib.h> unsigned int filter_radius = 16; #define FILTER_LENGTH (2 * 16 + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) #define accuracy 0.05 #define TYPE float #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } //GPU code __constant__ TYPE d_Filter[FILTER_LENGTH]; __global__ void convolROWS(TYPE* d_Buffer, TYPE* d_Input, int imageH, int imageW, int filterR) { int k; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tx = threadIdx.y * blockDim.x + threadIdx.x; int threadId = blockId * (blockDim.x * blockDim.y) + tx; TYPE sum = 0; //tile = threadBlock = 1024 __shared__ TYPE s_input[1024]; //load to shared memory s_input[tx] = d_Input[threadId]; __syncthreads(); // wait for all shared data to load //result for (k = -filterR; k <= filterR; k++) { int d = threadIdx.x + k; if (d >= 0 && (d < imageW)) { sum += s_input[tx + k] * d_Filter[filterR - k]; } d_Buffer[threadId] = sum; } } __global__ void convolCOL(TYPE* d_Output_GPU, TYPE* d_Buffer, int imageH, int imageW, int filterR) { int k; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int ty = threadIdx.x * blockDim.y + threadIdx.y; TYPE sum = 0; //tile = threadBlock = 1024 __shared__ TYPE s_input[1024]; // load to shared memory s_input[ty] = d_Buffer[threadIdx.y*imageW + blockId*blockDim.x + threadIdx.x]; __syncthreads(); // wait for all shared data to load // result for (k = -filterR; k <= filterR; k++) { int d = threadIdx.y + k; if (d >= 0 && (d < imageW)) { sum += s_input[ty + k] * d_Filter[filterR - k]; } d_Output_GPU[threadIdx.y*imageW + blockId*blockDim.x + threadIdx.x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(TYPE *h_Dst, TYPE *h_Src, TYPE *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { TYPE sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(TYPE *h_Dst, TYPE *h_Src, TYPE *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { TYPE sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { TYPE *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU, *d_Input, *d_Output_GPU, *d_Buffer; int imageW; int imageH; unsigned int i; //printf("Enter filter radius : "); //scanf("%d", &filter_radius); //filter_radius = 16; printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH); scanf("%d", &imageW); imageH = imageW; // printf("Image Width x Height = %i x %i\n\n", imageW, imageH); // printf("Allocating and initializing host arrays...\n"); h_Filter = (TYPE *)malloc(FILTER_LENGTH * sizeof(TYPE)); h_Input = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); h_Buffer = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); h_OutputCPU = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); h_OutputGPU = (TYPE *)malloc(imageW * imageH * sizeof(TYPE)); if( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL ){ printf("Malloc allocation problem on host, exiting...\n"); return(1); } srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (TYPE)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (TYPE)rand() / ((TYPE)RAND_MAX / 255) + (TYPE)rand() / (TYPE)RAND_MAX; } // comment out CPU code since it is correct /* printf("CPU computation...\n"); clock_t begin = clock(); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); clock_t end = clock(); double cpu_time = (double)(end - begin) / CLOCKS_PER_SEC; printf("Cpu time: %lf\n",cpu_time); */ cudaMalloc((void**)&d_Input, imageW * imageH * sizeof(TYPE)); cudaMalloc((void**)&d_Output_GPU, imageW * imageH * sizeof(TYPE)); cudaMalloc((void**)&d_Buffer, imageW * imageH * sizeof(TYPE)); if(!(d_Input || d_Output_GPU || d_Buffer)){ printf("Malloc allocation problem on device, exiting.. \n"); return(1); } //TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float tot_time=0, timer = 0; cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(TYPE), cudaMemcpyHostToDevice); // d_Filter is in __constant__ memory cudaMemcpyToSymbol(d_Filter, h_Filter, sizeof(TYPE) * FILTER_LENGTH); cudaCheckError(); //block & grid dimensions int x_block, y_block, x_grid, y_grid; if(imageW < 1024){ x_block = imageW; y_block = 1024 / imageW; x_grid = 1; y_grid = (imageW*imageW)/1024; } else{ if( imageW == 1024){ x_block = 1024; y_block = 1; x_grid = 1; y_grid = imageW; } else{ x_block = 1024; y_block = 1; x_grid = (imageW*imageW)/1024; y_grid = imageH; } } dim3 grid(x_grid , y_grid); dim3 block(x_block, y_block); cudaEventRecord(start); convolROWS<<<grid , block>>>(d_Buffer, d_Input, imageH, imageW, filter_radius); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&timer, start, stop); printf("Kernel time for rows: %f\n", timer); tot_time += timer; //wait 1st kernel to finish cudaThreadSynchronize(); //check for errors cudaCheckError(); dim3 grid2(y_grid , x_grid); dim3 block2(y_block, x_block); cudaEventRecord(start); convolCOL<<<grid2 , block2>>>(d_Output_GPU, d_Buffer, imageH, imageW, filter_radius); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&timer, start, stop); printf("Kernel time for col: %f\n", timer); tot_time += timer; //wait to finish cudaThreadSynchronize(); //check for errors cudaCheckError(); //copy output from device to host cudaMemcpy(h_OutputGPU, d_Output_GPU, imageW * imageH * sizeof(TYPE), cudaMemcpyDeviceToHost); printf("Total time for GPU: %f\n", tot_time); // comment out CPU GPC comparison code since it is correct //compare /* TYPE max = 0.0; unsigned int counter=0; for (i = 0; i < imageW * imageH; i++) { if(ABS(h_OutputGPU[i] - h_OutputCPU[i]) > max){ max = ABS(h_OutputGPU[i] - h_OutputCPU[i]); printf("iteration= %d max=%10g \n",i, max); counter++; } } printf("for %d filter, max= %d, counter=%d\n", filter_radius, max, counter); */ // free all the allocated memory free(h_OutputCPU); free(h_OutputGPU); free(h_Buffer); free(h_Input); free(h_Filter); cudaFree(d_Input); cudaFree(d_Output_GPU); cudaDeviceReset(); return 0; }
187f58c0c15f35ea61a80173956b32a1af480c49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G adjustHueCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const T delta, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; T h, s, v; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], h, s, v); h += delta ; if(h > 1) h -= 1; else if(h < 0) h += 1; hsvToRgb<T>(h, s, v, zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void adjustHueCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const NDArray* deltaScalarArr, const int dimC) { hipLaunchKernelGGL(( adjustHueCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, deltaScalarArr->e<T>(0), dimC); } //////////////////////////////////////////////////////////////////////// void adjustHue(sd::LaunchContext* context, const NDArray *input, const NDArray* deltaScalarArr, NDArray *output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "adjustHue"); NDArray::prepareSpecialUse({output}, {input, deltaScalarArr}); BUILD_SINGLE_SELECTOR(input->dataType(), adjustHueCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, deltaScalarArr, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, deltaScalarArr}); manager.synchronize(); } /* template <typename T> static void _CUDA_G adjustHueSingleNHWCKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto bIn = reinterpret_cast<T*>(xBuffer); auto bOut = reinterpret_cast<T*>(zBuffer); static const int kChannelRange = 6; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto i = bIn + e * numChannels; auto o = bOut + e * numChannels; T h, v_min, v_max; helpers::rgb_to_hv(i[0], i[1], i[2], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0.) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, o, o + 1, o + 2); } } template <typename T> static void _CUDA_G adjustHueSingleNCHWKernel(void *xBuffer, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, void *zBuffer, Nd4jLong *zTadShapeInfo, Nd4jLong *zOffsets, Nd4jLong tadLength, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; static const int kChannelRange = 6; auto bufferR = reinterpret_cast<T *>(xBuffer) + xOffsets[0]; auto bufferG = reinterpret_cast<T *>(xBuffer) + xOffsets[1]; auto bufferB = reinterpret_cast<T *>(xBuffer) + xOffsets[2]; auto outputR = reinterpret_cast<T *>(zBuffer) + zOffsets[0]; auto outputG = reinterpret_cast<T *>(zBuffer) + zOffsets[1]; auto outputB = reinterpret_cast<T *>(zBuffer) + zOffsets[2]; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto _ri = bufferR + shape::getIndexOffset(e, xTadShapeInfo); auto _gi = bufferG + shape::getIndexOffset(e, xTadShapeInfo); auto _bi = bufferB + shape::getIndexOffset(e, xTadShapeInfo); auto _ro = outputR + shape::getIndexOffset(e, xTadShapeInfo); auto _go = outputG + shape::getIndexOffset(e, xTadShapeInfo); auto _bo = outputB + shape::getIndexOffset(e, xTadShapeInfo); T h, v_min, v_max; helpers::rgb_to_hv(_ri[0], _gi[0], _bi[0], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, _ro, _go, _bo); } } template <typename T> static void _adjust_hue_single(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { adjustHueSingleNHWCKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), array->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), tuples, delta); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->getShapeInfo(), {1, 2}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {1, 2}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } template <typename T> static void _adjust_hue_batch(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { auto xType = array->dataType(); // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { // in case of nhwc batch, we don't really care about examples: it's still bunch of RGB values BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, delta, isNHWC);, FLOAT_TYPES); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->getShapeInfo(), {0, 2, 3}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {0, 2, 3}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } void _adjust_hue(sd::LaunchContext * context, NDArray *array, NDArray *output, NDArray* delta, bool isNHWC) { auto xType = array->dataType(); float d = delta->e<float>(0); if (array->rankOf() == 4) { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_batch, (context, array, output, d, isNHWC);, FLOAT_TYPES); } else { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, d, isNHWC);, FLOAT_TYPES); } } */ } } }
187f58c0c15f35ea61a80173956b32a1af480c49.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G adjustHueCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const T delta, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; T h, s, v; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], h, s, v); h += delta ; if(h > 1) h -= 1; else if(h < 0) h += 1; hsvToRgb<T>(h, s, v, zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void adjustHueCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const NDArray* deltaScalarArr, const int dimC) { adjustHueCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, deltaScalarArr->e<T>(0), dimC); } //////////////////////////////////////////////////////////////////////// void adjustHue(sd::LaunchContext* context, const NDArray *input, const NDArray* deltaScalarArr, NDArray *output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "adjustHue"); NDArray::prepareSpecialUse({output}, {input, deltaScalarArr}); BUILD_SINGLE_SELECTOR(input->dataType(), adjustHueCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, deltaScalarArr, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, deltaScalarArr}); manager.synchronize(); } /* template <typename T> static void _CUDA_G adjustHueSingleNHWCKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto bIn = reinterpret_cast<T*>(xBuffer); auto bOut = reinterpret_cast<T*>(zBuffer); static const int kChannelRange = 6; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto i = bIn + e * numChannels; auto o = bOut + e * numChannels; T h, v_min, v_max; helpers::rgb_to_hv(i[0], i[1], i[2], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0.) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, o, o + 1, o + 2); } } template <typename T> static void _CUDA_G adjustHueSingleNCHWKernel(void *xBuffer, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, void *zBuffer, Nd4jLong *zTadShapeInfo, Nd4jLong *zOffsets, Nd4jLong tadLength, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; static const int kChannelRange = 6; auto bufferR = reinterpret_cast<T *>(xBuffer) + xOffsets[0]; auto bufferG = reinterpret_cast<T *>(xBuffer) + xOffsets[1]; auto bufferB = reinterpret_cast<T *>(xBuffer) + xOffsets[2]; auto outputR = reinterpret_cast<T *>(zBuffer) + zOffsets[0]; auto outputG = reinterpret_cast<T *>(zBuffer) + zOffsets[1]; auto outputB = reinterpret_cast<T *>(zBuffer) + zOffsets[2]; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto _ri = bufferR + shape::getIndexOffset(e, xTadShapeInfo); auto _gi = bufferG + shape::getIndexOffset(e, xTadShapeInfo); auto _bi = bufferB + shape::getIndexOffset(e, xTadShapeInfo); auto _ro = outputR + shape::getIndexOffset(e, xTadShapeInfo); auto _go = outputG + shape::getIndexOffset(e, xTadShapeInfo); auto _bo = outputB + shape::getIndexOffset(e, xTadShapeInfo); T h, v_min, v_max; helpers::rgb_to_hv(_ri[0], _gi[0], _bi[0], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, _ro, _go, _bo); } } template <typename T> static void _adjust_hue_single(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { adjustHueSingleNHWCKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), array->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), tuples, delta); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->getShapeInfo(), {1, 2}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {1, 2}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } template <typename T> static void _adjust_hue_batch(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { auto xType = array->dataType(); // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { // in case of nhwc batch, we don't really care about examples: it's still bunch of RGB values BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, delta, isNHWC);, FLOAT_TYPES); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->getShapeInfo(), {0, 2, 3}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {0, 2, 3}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } void _adjust_hue(sd::LaunchContext * context, NDArray *array, NDArray *output, NDArray* delta, bool isNHWC) { auto xType = array->dataType(); float d = delta->e<float>(0); if (array->rankOf() == 4) { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_batch, (context, array, output, d, isNHWC);, FLOAT_TYPES); } else { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, d, isNHWC);, FLOAT_TYPES); } } */ } } }
894ca140de31790b057579cd0ff25c2eabff1533.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper #include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> #if CUDART_VERSION < 3000 struct double3 { double x, y, z; }; #endif /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 128 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0 /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { checkCudaErrors(hipFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost)); } void dump(double* variables, int nel, int nelr) { double* h_variables = new double[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ double ff_variable[NVAR]; __constant__ double3 ff_fc_momentum_x[1]; __constant__ double3 ff_fc_momentum_y[1]; __constant__ double3 ff_fc_momentum_z[1]; __constant__ double3 ff_fc_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, double* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, double* variables) { dim3 Dg(nelr / block_length), Db(block_length); hipError_t error; hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s initialize variables \n", hipGetErrorString(error)); exit(-1); } } __device__ __host__ inline void compute_flux_contribution(double& density, double3& momentum, double& density_energy, double& pressure, double3& velocity, double3& fc_momentum_x, double3& fc_momentum_y, double3& fc_momentum_z, double3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; double de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(double& density, double3& momentum, double3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline double compute_speed_sqd(double3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline double compute_pressure(double& density, double& density_energy, double& speed_sqd) { return (double(GAMMA)-double(1.0))*(density_energy - double(0.5)*density*speed_sqd); } __device__ inline double compute_speed_of_sound(double& density, double& pressure) { return sqrt(double(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double density = variables[i + VAR_DENSITY*nelr]; double3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity; compute_velocity(density, momentum, velocity); double speed_sqd = compute_speed_sqd(velocity); double pressure = compute_pressure(density, density_energy, speed_sqd); double speed_of_sound = compute_speed_of_sound(density, pressure); // dt = double(0.5) * sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = double(0.5) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux_contributions(int nelr, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double density_i = variables[i + VAR_DENSITY*nelr]; double3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); double speed_sqd_i = compute_speed_sqd(velocity_i); double speed_i = sqrtf(speed_sqd_i); double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); double3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; double3 fc_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z, fc_i_density_energy); fc_momentum_x[i + 0*nelr] = fc_i_momentum_x.x; fc_momentum_x[i + 1*nelr] = fc_i_momentum_x.y; fc_momentum_x[i + 2*nelr] = fc_i_momentum_x.z; fc_momentum_y[i + 0*nelr] = fc_i_momentum_y.x; fc_momentum_y[i + 1*nelr] = fc_i_momentum_y.y; fc_momentum_y[i + 2*nelr] = fc_i_momentum_y.z; fc_momentum_z[i + 0*nelr] = fc_i_momentum_z.x; fc_momentum_z[i + 1*nelr] = fc_i_momentum_z.y; fc_momentum_z[i + 2*nelr] = fc_i_momentum_z.z; fc_density_energy[i + 0*nelr] = fc_i_density_energy.x; fc_density_energy[i + 1*nelr] = fc_i_density_energy.y; fc_density_energy[i + 2*nelr] = fc_i_density_energy.z; } void compute_flux_contributions(int nelr, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy) { dim3 Dg(nelr / block_length), Db(block_length); hipError_t error; hipLaunchKernelGGL(( cuda_compute_flux_contributions), dim3(Dg),dim3(Db), 0, 0, nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contribution failed\n", hipGetErrorString(error)); exit(-1); } } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy, double* fluxes) { const double smoothing_coefficient = double(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; double3 normal; double normal_len; double factor; double density_i = variables[i + VAR_DENSITY*nelr]; double3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); double speed_sqd_i = compute_speed_sqd(velocity_i); double speed_i = sqrt(speed_sqd_i); double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); double3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; double3 fc_i_density_energy; fc_i_momentum_x.x = fc_momentum_x[i + 0*nelr]; fc_i_momentum_x.y = fc_momentum_x[i + 1*nelr]; fc_i_momentum_x.z = fc_momentum_x[i + 2*nelr]; fc_i_momentum_y.x = fc_momentum_y[i + 0*nelr]; fc_i_momentum_y.y = fc_momentum_y[i + 1*nelr]; fc_i_momentum_y.z = fc_momentum_y[i + 2*nelr]; fc_i_momentum_z.x = fc_momentum_z[i + 0*nelr]; fc_i_momentum_z.y = fc_momentum_z[i + 1*nelr]; fc_i_momentum_z.z = fc_momentum_z[i + 2*nelr]; fc_i_density_energy.x = fc_density_energy[i + 0*nelr]; fc_i_density_energy.y = fc_density_energy[i + 1*nelr]; fc_i_density_energy.z = fc_density_energy[i + 2*nelr]; double flux_i_density = double(0.0); double3 flux_i_momentum; flux_i_momentum.x = double(0.0); flux_i_momentum.y = double(0.0); flux_i_momentum.z = double(0.0); double flux_i_density_energy = double(0.0); double3 velocity_nb; double density_nb, density_energy_nb; double3 momentum_nb; double3 fc_nb_momentum_x, fc_nb_momentum_y, fc_nb_momentum_z; double3 fc_nb_density_energy; double speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); fc_nb_momentum_x.x = fc_momentum_x[nb + 0*nelr]; fc_nb_momentum_x.y = fc_momentum_x[nb + 1*nelr]; fc_nb_momentum_x.z = fc_momentum_x[nb + 2*nelr]; fc_nb_momentum_y.x = fc_momentum_y[nb + 0*nelr]; fc_nb_momentum_y.y = fc_momentum_y[nb + 1*nelr]; fc_nb_momentum_y.z = fc_momentum_y[nb + 2*nelr]; fc_nb_momentum_z.x = fc_momentum_z[nb + 0*nelr]; fc_nb_momentum_z.y = fc_momentum_z[nb + 1*nelr]; fc_nb_momentum_z.z = fc_momentum_z[nb + 2*nelr]; fc_nb_density_energy.x = fc_density_energy[nb + 0*nelr]; fc_nb_density_energy.y = fc_density_energy[nb + 1*nelr]; fc_nb_density_energy.z = fc_density_energy[nb + 2*nelr]; // artificial viscosity factor = -normal_len*smoothing_coefficient*double(0.5)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = double(0.5)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(fc_nb_density_energy.x+fc_i_density_energy.x); flux_i_momentum.x += factor*(fc_nb_momentum_x.x+fc_i_momentum_x.x); flux_i_momentum.y += factor*(fc_nb_momentum_y.x+fc_i_momentum_y.x); flux_i_momentum.z += factor*(fc_nb_momentum_z.x+fc_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(fc_nb_density_energy.y+fc_i_density_energy.y); flux_i_momentum.x += factor*(fc_nb_momentum_x.y+fc_i_momentum_x.y); flux_i_momentum.y += factor*(fc_nb_momentum_y.y+fc_i_momentum_y.y); flux_i_momentum.z += factor*(fc_nb_momentum_z.y+fc_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(fc_nb_density_energy.z+fc_i_density_energy.z); flux_i_momentum.x += factor*(fc_nb_momentum_x.z+fc_i_momentum_x.z); flux_i_momentum.y += factor*(fc_nb_momentum_y.z+fc_i_momentum_y.z); flux_i_momentum.z += factor*(fc_nb_momentum_z.z+fc_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = double(0.5)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_fc_density_energy[0].x+fc_i_density_energy.x); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].x + fc_i_momentum_x.x); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].x + fc_i_momentum_y.x); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].x + fc_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_fc_density_energy[0].y+fc_i_density_energy.y); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].y + fc_i_momentum_x.y); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].y + fc_i_momentum_y.y); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].y + fc_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_fc_density_energy[0].z+fc_i_density_energy.z); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].z + fc_i_momentum_x.z); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].z + fc_i_momentum_y.z); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].z + fc_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy, double* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); hipError_t error; hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double factor = step_factors[i]/double(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s update failed\n", hipGetErrorString(error)); exit(-1); } } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; // set far field conditions and load them into constant memory on the gpu { double h_ff_variable[NVAR]; const double angle_of_attack = double(3.1415926535897931 / 180.0) * double(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = double(1.4); double ff_pressure = double(1.0); double ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); double ff_speed = double(ff_mach)*ff_speed_of_sound; double3 ff_velocity; ff_velocity.x = ff_speed*double(cos((double)angle_of_attack)); ff_velocity.y = ff_speed*double(sin((double)angle_of_attack)); ff_velocity.z = 0.0; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(double(0.5)*(ff_speed*ff_speed)) + (ff_pressure / double(GAMMA-1.0)); double3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); double3 h_ff_fc_momentum_x; double3 h_ff_fc_momentum_y; double3 h_ff_fc_momentum_z; double3 h_ff_fc_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_fc_momentum_x, h_ff_fc_momentum_y, h_ff_fc_momentum_z, h_ff_fc_density_energy); // copy far field conditions to the gpu checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(double)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_x, &h_ff_fc_momentum_x, sizeof(double3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_y, &h_ff_fc_momentum_y, sizeof(double3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_z, &h_ff_fc_momentum_z, sizeof(double3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_density_energy, &h_ff_fc_density_energy, sizeof(double3)) ); } int nel; int nelr; // read in domain geometry double* areas; int* elements_surrounding_elements; double* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length)); double* h_areas = new double[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; double* h_normals = new double[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<double>(nelr); upload<double>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<double>(nelr*NDIM*NNB); upload<double>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions double* variables = alloc<double>(nelr*NVAR); initialize_variables(nelr, variables); double* old_variables = alloc<double>(nelr*NVAR); double* fluxes = alloc<double>(nelr*NVAR); double* step_factors = alloc<double>(nelr); double* fc_momentum_x = alloc<double>(nelr*NDIM); double* fc_momentum_y = alloc<double>(nelr*NDIM); double* fc_momentum_z = alloc<double>(nelr*NDIM); double* fc_density_energy = alloc<double>(nelr*NDIM); // make sure all memory is doublely allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(double)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = NULL; sdkCreateTimer( &timer); sdkStartTimer( &timer); // Begin iterations for(int i = 0; i < iterations; i++) { copy<double>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); for(int j = 0; j < RK; j++) { compute_flux_contributions(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); compute_flux(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); time_step(j, nelr, old_variables, variables, step_factors, fluxes); } } hipDeviceSynchronize(); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<double>(areas); dealloc<int>(elements_surrounding_elements); dealloc<double>(normals); dealloc<double>(variables); dealloc<double>(old_variables); dealloc<double>(fluxes); dealloc<double>(step_factors); dealloc<double>(fc_momentum_x); dealloc<double>(fc_momentum_y); dealloc<double>(fc_momentum_z); dealloc<double>(fc_density_energy); std::cout << "Done..." << std::endl; return 0; }
894ca140de31790b057579cd0ff25c2eabff1533.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper #include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> #if CUDART_VERSION < 3000 struct double3 { double x, y, z; }; #endif /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 128 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0 /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { checkCudaErrors(cudaFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost)); } void dump(double* variables, int nel, int nelr) { double* h_variables = new double[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ double ff_variable[NVAR]; __constant__ double3 ff_fc_momentum_x[1]; __constant__ double3 ff_fc_momentum_y[1]; __constant__ double3 ff_fc_momentum_z[1]; __constant__ double3 ff_fc_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, double* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, double* variables) { dim3 Dg(nelr / block_length), Db(block_length); cudaError_t error; cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s initialize variables \n", cudaGetErrorString(error)); exit(-1); } } __device__ __host__ inline void compute_flux_contribution(double& density, double3& momentum, double& density_energy, double& pressure, double3& velocity, double3& fc_momentum_x, double3& fc_momentum_y, double3& fc_momentum_z, double3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; double de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(double& density, double3& momentum, double3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline double compute_speed_sqd(double3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline double compute_pressure(double& density, double& density_energy, double& speed_sqd) { return (double(GAMMA)-double(1.0))*(density_energy - double(0.5)*density*speed_sqd); } __device__ inline double compute_speed_of_sound(double& density, double& pressure) { return sqrt(double(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double density = variables[i + VAR_DENSITY*nelr]; double3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity; compute_velocity(density, momentum, velocity); double speed_sqd = compute_speed_sqd(velocity); double pressure = compute_pressure(density, density_energy, speed_sqd); double speed_of_sound = compute_speed_of_sound(density, pressure); // dt = double(0.5) * sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = double(0.5) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux_contributions(int nelr, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double density_i = variables[i + VAR_DENSITY*nelr]; double3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); double speed_sqd_i = compute_speed_sqd(velocity_i); double speed_i = sqrtf(speed_sqd_i); double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); double3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; double3 fc_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z, fc_i_density_energy); fc_momentum_x[i + 0*nelr] = fc_i_momentum_x.x; fc_momentum_x[i + 1*nelr] = fc_i_momentum_x.y; fc_momentum_x[i + 2*nelr] = fc_i_momentum_x.z; fc_momentum_y[i + 0*nelr] = fc_i_momentum_y.x; fc_momentum_y[i + 1*nelr] = fc_i_momentum_y.y; fc_momentum_y[i + 2*nelr] = fc_i_momentum_y.z; fc_momentum_z[i + 0*nelr] = fc_i_momentum_z.x; fc_momentum_z[i + 1*nelr] = fc_i_momentum_z.y; fc_momentum_z[i + 2*nelr] = fc_i_momentum_z.z; fc_density_energy[i + 0*nelr] = fc_i_density_energy.x; fc_density_energy[i + 1*nelr] = fc_i_density_energy.y; fc_density_energy[i + 2*nelr] = fc_i_density_energy.z; } void compute_flux_contributions(int nelr, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy) { dim3 Dg(nelr / block_length), Db(block_length); cudaError_t error; cuda_compute_flux_contributions<<<Dg,Db>>>(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contribution failed\n", cudaGetErrorString(error)); exit(-1); } } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy, double* fluxes) { const double smoothing_coefficient = double(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; double3 normal; double normal_len; double factor; double density_i = variables[i + VAR_DENSITY*nelr]; double3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); double speed_sqd_i = compute_speed_sqd(velocity_i); double speed_i = sqrt(speed_sqd_i); double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); double3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; double3 fc_i_density_energy; fc_i_momentum_x.x = fc_momentum_x[i + 0*nelr]; fc_i_momentum_x.y = fc_momentum_x[i + 1*nelr]; fc_i_momentum_x.z = fc_momentum_x[i + 2*nelr]; fc_i_momentum_y.x = fc_momentum_y[i + 0*nelr]; fc_i_momentum_y.y = fc_momentum_y[i + 1*nelr]; fc_i_momentum_y.z = fc_momentum_y[i + 2*nelr]; fc_i_momentum_z.x = fc_momentum_z[i + 0*nelr]; fc_i_momentum_z.y = fc_momentum_z[i + 1*nelr]; fc_i_momentum_z.z = fc_momentum_z[i + 2*nelr]; fc_i_density_energy.x = fc_density_energy[i + 0*nelr]; fc_i_density_energy.y = fc_density_energy[i + 1*nelr]; fc_i_density_energy.z = fc_density_energy[i + 2*nelr]; double flux_i_density = double(0.0); double3 flux_i_momentum; flux_i_momentum.x = double(0.0); flux_i_momentum.y = double(0.0); flux_i_momentum.z = double(0.0); double flux_i_density_energy = double(0.0); double3 velocity_nb; double density_nb, density_energy_nb; double3 momentum_nb; double3 fc_nb_momentum_x, fc_nb_momentum_y, fc_nb_momentum_z; double3 fc_nb_density_energy; double speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); fc_nb_momentum_x.x = fc_momentum_x[nb + 0*nelr]; fc_nb_momentum_x.y = fc_momentum_x[nb + 1*nelr]; fc_nb_momentum_x.z = fc_momentum_x[nb + 2*nelr]; fc_nb_momentum_y.x = fc_momentum_y[nb + 0*nelr]; fc_nb_momentum_y.y = fc_momentum_y[nb + 1*nelr]; fc_nb_momentum_y.z = fc_momentum_y[nb + 2*nelr]; fc_nb_momentum_z.x = fc_momentum_z[nb + 0*nelr]; fc_nb_momentum_z.y = fc_momentum_z[nb + 1*nelr]; fc_nb_momentum_z.z = fc_momentum_z[nb + 2*nelr]; fc_nb_density_energy.x = fc_density_energy[nb + 0*nelr]; fc_nb_density_energy.y = fc_density_energy[nb + 1*nelr]; fc_nb_density_energy.z = fc_density_energy[nb + 2*nelr]; // artificial viscosity factor = -normal_len*smoothing_coefficient*double(0.5)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = double(0.5)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(fc_nb_density_energy.x+fc_i_density_energy.x); flux_i_momentum.x += factor*(fc_nb_momentum_x.x+fc_i_momentum_x.x); flux_i_momentum.y += factor*(fc_nb_momentum_y.x+fc_i_momentum_y.x); flux_i_momentum.z += factor*(fc_nb_momentum_z.x+fc_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(fc_nb_density_energy.y+fc_i_density_energy.y); flux_i_momentum.x += factor*(fc_nb_momentum_x.y+fc_i_momentum_x.y); flux_i_momentum.y += factor*(fc_nb_momentum_y.y+fc_i_momentum_y.y); flux_i_momentum.z += factor*(fc_nb_momentum_z.y+fc_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(fc_nb_density_energy.z+fc_i_density_energy.z); flux_i_momentum.x += factor*(fc_nb_momentum_x.z+fc_i_momentum_x.z); flux_i_momentum.y += factor*(fc_nb_momentum_y.z+fc_i_momentum_y.z); flux_i_momentum.z += factor*(fc_nb_momentum_z.z+fc_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = double(0.5)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_fc_density_energy[0].x+fc_i_density_energy.x); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].x + fc_i_momentum_x.x); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].x + fc_i_momentum_y.x); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].x + fc_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_fc_density_energy[0].y+fc_i_density_energy.y); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].y + fc_i_momentum_x.y); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].y + fc_i_momentum_y.y); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].y + fc_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_fc_density_energy[0].z+fc_i_density_energy.z); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].z + fc_i_momentum_x.z); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].z + fc_i_momentum_y.z); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].z + fc_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fc_momentum_x, double* fc_momentum_y, double* fc_momentum_z, double* fc_density_energy, double* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); cudaError_t error; cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double factor = step_factors[i]/double(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s update failed\n", cudaGetErrorString(error)); exit(-1); } } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; // set far field conditions and load them into constant memory on the gpu { double h_ff_variable[NVAR]; const double angle_of_attack = double(3.1415926535897931 / 180.0) * double(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = double(1.4); double ff_pressure = double(1.0); double ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); double ff_speed = double(ff_mach)*ff_speed_of_sound; double3 ff_velocity; ff_velocity.x = ff_speed*double(cos((double)angle_of_attack)); ff_velocity.y = ff_speed*double(sin((double)angle_of_attack)); ff_velocity.z = 0.0; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(double(0.5)*(ff_speed*ff_speed)) + (ff_pressure / double(GAMMA-1.0)); double3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); double3 h_ff_fc_momentum_x; double3 h_ff_fc_momentum_y; double3 h_ff_fc_momentum_z; double3 h_ff_fc_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_fc_momentum_x, h_ff_fc_momentum_y, h_ff_fc_momentum_z, h_ff_fc_density_energy); // copy far field conditions to the gpu checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(double)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_x, &h_ff_fc_momentum_x, sizeof(double3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_y, &h_ff_fc_momentum_y, sizeof(double3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_z, &h_ff_fc_momentum_z, sizeof(double3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_density_energy, &h_ff_fc_density_energy, sizeof(double3)) ); } int nel; int nelr; // read in domain geometry double* areas; int* elements_surrounding_elements; double* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length)); double* h_areas = new double[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; double* h_normals = new double[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<double>(nelr); upload<double>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<double>(nelr*NDIM*NNB); upload<double>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions double* variables = alloc<double>(nelr*NVAR); initialize_variables(nelr, variables); double* old_variables = alloc<double>(nelr*NVAR); double* fluxes = alloc<double>(nelr*NVAR); double* step_factors = alloc<double>(nelr); double* fc_momentum_x = alloc<double>(nelr*NDIM); double* fc_momentum_y = alloc<double>(nelr*NDIM); double* fc_momentum_z = alloc<double>(nelr*NDIM); double* fc_density_energy = alloc<double>(nelr*NDIM); // make sure all memory is doublely allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(double)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaThreadSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = NULL; sdkCreateTimer( &timer); sdkStartTimer( &timer); // Begin iterations for(int i = 0; i < iterations; i++) { copy<double>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); for(int j = 0; j < RK; j++) { compute_flux_contributions(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); compute_flux(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); time_step(j, nelr, old_variables, variables, step_factors, fluxes); } } cudaThreadSynchronize(); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<double>(areas); dealloc<int>(elements_surrounding_elements); dealloc<double>(normals); dealloc<double>(variables); dealloc<double>(old_variables); dealloc<double>(fluxes); dealloc<double>(step_factors); dealloc<double>(fc_momentum_x); dealloc<double>(fc_momentum_y); dealloc<double>(fc_momentum_z); dealloc<double>(fc_density_energy); std::cout << "Done..." << std::endl; return 0; }
5351db2db2f730d26df0b0cc30928cee187cb553.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* This file contains the nvgraph generalized implementation of the Duane Merrill's CUB CSRMV using MergePath */ #include "nvgraph_csrmv.hxx" #include "exclusive_kv_scan.hxx" //atomics are included in semiring #include "semiring.hxx" #include "nvgraph_error.hxx" //IMPORTANT: IndexType_ must be a signed integer, long, long long etc. Unsigned int is not supported, since -1 is //used as a flag value namespace nvgraph{ //Calculates SM to be used-add to cpp host file __forceinline__ hipError_t SmVersion(int &smVersion, int deviceOrdinal) { hipError_t error = hipSuccess; //assume sucess and state otherwise if fails condition do { //Find out SM version int major, minor; if (error = hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, deviceOrdinal)) break; if (error = hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, deviceOrdinal)) break; smVersion = 100 * major + 10 * minor; } while(0); return error; } template< int _BLOCK_THREADS, //number of threads per thread block int _ITEMS_PER_THREAD> //number of items per individual thread struct SpmvBlockThread //this is in agent file other template parameters ignoring for now { //set constants enum { BLOCK_THREADS = _BLOCK_THREADS, //number of threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, //number of items per thread per tile(tid) of input }; }; //This function calculates the MergePath(load-balancing) for each thread by doing a binary search //along the diagonal template<typename IndexType_> __device__ __forceinline__ void MergePathSearch( IndexType_ diag, IndexType_ *A, //rowoffsets + 1 IndexType_ offset, //counter array IndexType_ A_length, IndexType_ B_length, Coord<IndexType_> &pathCoord) //returned by reference stores the path { IndexType_ splitMin = max(diag - B_length, IndexType_(0)); //must be nonnegative IndexType_ splitMax = min(diag, A_length); //stay in bounds //do binary search along diagonal while (splitMin < splitMax) { IndexType_ splitPivot = (splitMin + splitMax) / 2; //take average integer division-start in middle so can go up or down diagonal if (A[splitPivot] <= diag - splitPivot - 1 + offset) //i+j = diag -1 along cross diag **ignored B //move up A and down B from (i,j) to (i-1,j+1) { splitMin = splitPivot + 1; //increase a in case that it is less clearly before split_min <= split_pivot less than average } else { //move down A and up B splitMax = splitPivot; } } //transform back to array coordinates from cross diagaonl coordinates pathCoord.x = min(splitMin, A_length); //make sure do not go out of bounds; //constraint i + j = k pathCoord.y = diag - splitMin; } //Spmv search kernel that calls merge path and identifies the merge path starting coordinates for each tile template <typename SpmvBlockThread, typename IndexType_, typename ValueType_> __global__ void DeviceSpmvSearchKernel( //calls device function merge path int numMergeTiles, //[input] Number of spmv merge tiles which is the spmv grid size Coord<IndexType_> *dTileCoords, //[output] pointer to a temporary array of tile starting coordinates CsrMvParams<IndexType_, ValueType_> spParams) //[input] spmv input parameter with corrdponding needed arrays { //set the constants for the gpu architecture enum { BLOCK_THREADS = SpmvBlockThread::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvBlockThread::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid <= numMergeTiles) //verify within domain { IndexType_ diag = tid * TILE_ITEMS; Coord<IndexType_> tileCoord; //each tid will compute its own tile_coordinate //the above coordinate will be stored in tile_coordinate passed by reference //input row pointer starting at csrRowPtr[1] merge path ignores the 0 entry //the first argument to the counting constructor is the size-nnz and the second argument is where to start countings IndexType_ countStart = 0; //if row pointer is 1 based make sure count starts at 1 instead of 0 MergePathSearch(diag, spParams.csrRowPtr, countStart, spParams.m, spParams.nnz, tileCoord); //store path of thread in array of coordinates dTileCoords[tid] = tileCoord; //stores (y,x) = (i.j) coord of thread computed* } } //Agent sturct with two main inline functions which compute the spmv template< typename SpmvPolicyT, // parameterized SpmvBlockThread tuning policy type as listed above typename IndexType_, //index value of rowOffsets and ColIndices typename ValueType_, //matrix and vector value type typename SemiRingType_, //this follows different semiring structs to be passed depending on the enum bool hasAlpha, //signifies whether the input parameter alpha is 1 in y = alpha*A*x + beta*A*y bool hasBeta> //signifies whether the input parameter beta is 0 struct AgentSpmv { //set constants enum { BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; //we use the return type pair for scanning where the pairs are accumulated segment-value with segemn-index __device__ __forceinline__ KeyValuePair<IndexType_,ValueType_> consumeTile( Coord<IndexType_> tileStartCoord, //this gives the starting coordinate to be determined from the initial mergepath call Coord<IndexType_> tileEndCoord, CsrMvParams<IndexType_, ValueType_> &spParams, SemiRingType_ SR) //pass struct as a const reference { IndexType_ tileNumRows = tileEndCoord.x - tileStartCoord.x; //length(rowOffSets) = numRows + 1 in merge path ignore first element for 1 and so length of path in x-direction gives the exact number of rows IndexType_ tileNnz = tileEndCoord.y - tileStartCoord.y; //number of nonzero goes down path countingITerator is indexed by columnInd and Val array which are of size nnz //load row offsets into shared memory-create shared memory row offset pointer __shared__ IndexType_ smemTileRowPtr[ITEMS_PER_THREAD + TILE_ITEMS + 1]; //copy row offsets into shared memory for accumulating matrix vector dot products in the merge path for (int item = threadIdx.x; item <= tileNumRows; item += BLOCK_THREADS) //index by block_threads that is the number of threads per block //start with rowoffsets at the strat coordinate and corresponding threadId can modiy wd to do a cache wrapper for efficiency later { if ((tileStartCoord.x + item) < spParams.m) //memory protection since already at +1 only go up to m { smemTileRowPtr[item] = spParams.csrRowPtr[tileStartCoord.x + item]; } } //after loading into shared memory we must sync the threads to make sure all complete __syncthreads(); Coord<IndexType_> threadStartCoord; //call MergePath again on shared memory after using start indices IndexType_ diag = threadIdx.x * ITEMS_PER_THREAD; //compute diagonal //shared memory row pointer has been indexed down to 0 so count offset can start at 0 too //counter iterator starts at current y position IndexType_ countIndId = tileStartCoord.y; MergePathSearch(diag, smemTileRowPtr, //sort list A = row offsets in shared memort countIndId, //sort list B = natural number consecutive counting indices starting index tileNumRows, tileNnz, threadStartCoord); //resulting path is stored in threadStartCoord __syncthreads(); //make sure every thread has completed their diagonal of merge path //Compute the thread's merge path segment to perform the dot product foing down the merge path below in the loop Coord<IndexType_> threadCurrentCoord = threadStartCoord; KeyValuePair<IndexType_, ValueType_> scanSegment[ITEMS_PER_THREAD]; //static array of type key value pairs //initialize each dot product contribution to 0 ValueType_ totalValue; SR.setPlus_ident(totalValue);//initialize to semiring identity for plus operation #pragma unroll //unroll for loop for efficiency for (int item = 0; item < ITEMS_PER_THREAD; ++item) //loop over items belonging to thread along merge path { //go down merge path and sum. when move to right new component of result vector y //countInd is consecutive nonzero natural number array going down the matrix B so //indexed by y whereas rowOffset goes to the move and is A indexed by x countIndId = threadCurrentCoord.y + tileStartCoord.y; //line number problem IndexType_ nnzId = min(countIndId, spParams.nnz - 1); //make sure stay in bounds IndexType_ colIdx = spParams.csrColInd[nnzId]; ValueType_ A_val = spParams.csrVal[nnzId]; //A val //we assume A and x are of the same datatype //recall standard algorithm : y[row] += val[nz]*x[colInd[nnz]] in traditional sparse matrix vector form ValueType_ x_val = spParams.x[colIdx]; //csrColInd[nnzId] //wrapper of x vector could change dependent on the architecture //counter will tell direction to move either right or down since last entry of rowoffsets is the totla number of nonzeros //the counter array keeps track of this if (countIndId < smemTileRowPtr[threadCurrentCoord.x]) //this means less than the number of nonzeros in that row { //move down current row accumulating matrix and vector dot product totalValue = SR.plus(SR.times(A_val, x_val), totalValue); //add binary operation because may change to minus and min rather than + and * //store in key value pair scanSegment[item].key = tileNumRows; scanSegment[item].value = totalValue; ++threadCurrentCoord.y; } else //move right to new row and reset {//added in else if condition scanSegment[item].key = threadCurrentCoord.x; scanSegment[item].value = totalValue; //store current without adding new and set to 0 for new row SR.setPlus_ident(totalValue);//0.0;//SR.times_null; ++threadCurrentCoord.x; } } __syncthreads(); //now each thread block has their matrix vector multiplication and we must do a blockwide reduction //Block-wide reduce-value-by-segment KeyValuePair<IndexType_, ValueType_> scanItem, tileCarry; //this is the key value pair that we will be returning scanItem.key = threadCurrentCoord.x; //added min in other version had min with num rows scanItem.value = totalValue; PrefixSum<IndexType_, ValueType_, SemiRingType_, BLOCK_THREADS>(SR).ExclusiveKeyValueScan(scanItem, tileCarry); if (tileNumRows > 0) { if (threadIdx.x == 0) scanItem.key = -1; //can be negative imp to be int rather than unsigned int //do a direct scatter #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { if (scanSegment[item].key < tileNumRows) //scanSegment is an array of key value pairs { if (scanItem.key == scanSegment[item].key) { scanSegment[item].value = SR.plus(scanItem.value, scanSegment[item].value); } if (hasAlpha){ //boolean set to 1 need to multiply Ax by alpha as stored in spParams scanSegment[item].value = SR.times(spParams.alpha, scanSegment[item].value); } //check if has beta then need to alter y the right hand side is multiplied by beta if (hasBeta) { //y = alpha*A*x + beta*y ValueType_ y_val = spParams.y[tileStartCoord.x + scanSegment[item].key]; //currentxcoord is stored in the key and this will give corresponding and desired row entry in y scanSegment[item].value = SR.plus(SR.times(spParams.beta, y_val), scanSegment[item].value); } //Set the output vector row element spParams.y[tileStartCoord.x + scanSegment[item].key] = scanSegment[item].value; //disjoint keys } } } //Return the til'es running carry-out key value pair return tileCarry; //will come from exclusive scan } //overload consumetile function for the one in the interafce which will be called by the dispatch function __device__ __forceinline__ void consumeTile ( Coord<IndexType_> *dTileCoords, //pointer to the temporary array of tile starting cooordinates IndexType_ *dTileCarryKeys, //output pointer to temporary array carry-out dot product row-ids, one per block ValueType_ *dTileCarryValues, //output pointer to temporary array carry-out dot product row-ids, one per block int numMergeTiles, //number of merge tiles CsrMvParams<IndexType_, ValueType_> spParams, SemiRingType_ SR) { int tid = (blockIdx.x * gridDim.y) + blockIdx.y; //curent tile index //only continue if tid is in proper range if (tid >= numMergeTiles) return; Coord<IndexType_> tileStartCoord = dTileCoords[tid]; //+0 ignored Coord<IndexType_> tileEndCoord = dTileCoords[tid + 1]; //Consume multi-segment tile by calling above consumeTile overloaded function KeyValuePair<IndexType_, ValueType_> tileCarry = consumeTile( tileStartCoord, tileEndCoord, spParams, SR); //output the tile's carry out if (threadIdx.x == 0) { if (hasAlpha) tileCarry.value = SR.times(spParams.alpha, tileCarry.value); tileCarry.key += tileStartCoord.x; if (tileCarry.key < spParams.m) { dTileCarryKeys[tid] = tileCarry.key; dTileCarryValues[tid] = tileCarry.value; } else { // Make sure to reject keys larger than the matrix size directly here. // printf("%d %lf\n",tileCarry.key , tileCarry.value); // this patch may be obsolete after the changes related to bug#1754610 dTileCarryKeys[tid] = -1; } } } }; //this device kernel will call the above agent function-ignoring policies for now template < typename SpmvBlockThread, //parameterized spmvpolicy tunign policy type typename IndexType_, //index type either 32 bit or 64 bit integer for rowoffsets of columnindices typename ValueType_, //matrix and vector value type typename SemiRingType_, //this follows different semiring structs to be passed depending on the enum bool hasAlpha, //determines where alpha = 1 as above bool hasBeta> //determines whether beta = 0 as above __global__ void DeviceSpmvKernel( //this will call consume tile CsrMvParams<IndexType_, ValueType_> spParams, //pass constant reference to spmv parameters const SemiRingType_ &SR, Coord<IndexType_> *dTileCoords, //input pointer to temporaray array of the tile starting coordinates of each (y,x) = (i,j) pair on the merge path IndexType_ *dTileCarryKeys, //output is a pointer to the temp array that carries out the dot porduct row-ids where it is one per block ValueType_ *dTileCarryValues, //output is a pointer to the temp array that carries out the dot porduct row-ids where it is one per block int numTiles //input which is the number of merge tiles ) { //call Spmv agent type specialization- need to fix this call!! //now call cosntructor to initialize and consumeTile to calculate the row dot products AgentSpmv<SpmvBlockThread, IndexType_, ValueType_, SemiRingType_, hasAlpha, hasBeta>().consumeTile( dTileCoords, dTileCarryKeys, dTileCarryValues, numTiles, spParams, SR); } //Helper functions for the reduction by kernel //for block loading block_load_vectorize for SM_30 implemenation from cub //Load linear segment into blocked arrangement across the thread block, guarded by range, //with a fall-back assignment of -1 for out of bound template<int ITEMS_PER_THREAD, typename IndexType_, typename ValueType_> __device__ __forceinline__ void loadDirectBlocked( int linearTid, //input:a asuitable 1d thread-identifier for calling the thread IndexType_ *blockItrKeys, //input: thread block's base input iterator for loading from ValueType_ *blockItrValues, //input: thread block's base input iterator for loading from KeyValuePair<IndexType_, ValueType_> (&items)[ITEMS_PER_THREAD], // output:data to load int validItems, //input:Number of valid items to load KeyValuePair<IndexType_, ValueType_> outOfBoundsDefault) //input:Default value to assign to out of bounds items -1 in this case { #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { int offset = (linearTid * ITEMS_PER_THREAD) + item; // changed validItems to validItems-1 for bug#1754610 since it was causing uninitialized memory accesses here items[item].key = (offset < validItems-1) ? blockItrKeys[offset] : outOfBoundsDefault.key; items[item].value = (offset < validItems-1) ? blockItrValues[offset] : outOfBoundsDefault.value; } } //load linear segment of items into a blocked arangement across a thread block template<int ITEMS_PER_THREAD, typename IndexType_, typename ValueType_> __device__ __forceinline__ void loadDirectBlocked( int linearTid, IndexType_ * blockItrKeys, ValueType_ * blockItrValues, KeyValuePair<IndexType_,ValueType_> (&items)[ITEMS_PER_THREAD]) { //Load directly in thread-blocked order #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { items[item].key = blockItrKeys[(linearTid *ITEMS_PER_THREAD) + item]; items[item].value = blockItrValues[(linearTid *ITEMS_PER_THREAD) + item]; } } //This part pertains to the fixup kernel which does a device-wide reduce-value-by-key //for the thread blocks template< typename SpmvPolicyT, // parameterized SpmvBlockThread tuning policy type as listed above typename IndexType_, typename ValueType_, typename SemiRingType_> //matrix and vector value type struct AgentSegmentReduction { //set constants enum { BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; //This function processes an input tile and uses an atomic rewrite strategy template<bool isLastTile> __device__ __forceinline__ void consumeTilePost( IndexType_ *dInKeys, //input array of key value pairs ValueType_ *dInValues, //input array of key value pairs ValueType_ *dAggregatesOut, //output value aggregates into final array y IndexType_ numRemaining, //Number of global input items remaining including this tile IndexType_ tileOffset, //Tile offset SemiRingType_ SR ) { KeyValuePair<IndexType_,ValueType_> pairs[ITEMS_PER_THREAD]; KeyValuePair<IndexType_, ValueType_> outOfBoundsPair; outOfBoundsPair.key = -1; //default value to assign to out of bounds items is set to be -1 int linearTid = threadIdx.x; //load the values into pairs if (isLastTile) { loadDirectBlocked<ITEMS_PER_THREAD, IndexType_, ValueType_> (linearTid, dInKeys + tileOffset, dInValues + tileOffset, pairs, numRemaining, outOfBoundsPair); } else { loadDirectBlocked<ITEMS_PER_THREAD, IndexType_, ValueType_> (linearTid, dInKeys + tileOffset, dInValues + tileOffset, pairs); } #pragma unroll for (int item = 1; item < ITEMS_PER_THREAD; ++item) { ValueType_ *dScatter = dAggregatesOut + pairs[item-1].key; //write to correct row using the key if (pairs[item].key != pairs[item-1].key) { SR.atomicPlus(dScatter, pairs[item -1].value); } else pairs[item].value = SR.plus(pairs[item -1].value, pairs[item].value); //the operation is SUm } // Write out last item if it is valid by checking last key boolean. // pairs[ITEMS_PER_THREAD - 1].key = -1 for out bound elements. ValueType_ *dScatter = dAggregatesOut + pairs[ITEMS_PER_THREAD - 1].key; if ((!isLastTile || pairs[ITEMS_PER_THREAD - 1].key >= 0)) { //printf("hello %d %lf\n", pairs[ITEMS_PER_THREAD - 1].key , pairs[ITEMS_PER_THREAD -1].value); SR.atomicPlus(dScatter, pairs[ITEMS_PER_THREAD -1].value); } } //this function will call consumeTilePost and it scans the tiles of items as a part of a dynamic chained scan __device__ __forceinline__ void consumeRange( IndexType_ *dKeysIn, //input array of key value pairs ValueType_ *dValuesIn, //input array of key value pairs ValueType_ *dAggregatesOut, //output value aggregates into final array y int numItems, //totall number of input items int numTiles, //total number of input tiles SemiRingType_ SR) { //Blocks are launched in increasing order, so we assign one tile per block int tileIdx = (blockIdx.x * gridDim.y) + blockIdx.y; //current tile index same as in consumeTile IndexType_ tileOffset = tileIdx * TILE_ITEMS; //Global offset for the current tile IndexType_ numRemaining = numItems - tileOffset; //Remaining items which includes this tile if (numRemaining > TILE_ITEMS) //this is not the last tile so call wit template argument set to be false consumeTilePost<false>(dKeysIn, dValuesIn, dAggregatesOut, numRemaining,tileOffset, SR); else if (numRemaining > 0) //this is the last tile which could be possibly partially full consumeTilePost<true>(dKeysIn, dValuesIn, dAggregatesOut, numRemaining,tileOffset, SR); } }; //Blockwide reduction by key final kernel template < typename SpmvBlockThreadSegment, //parameterized spmvpolicy tuning policy type typename IndexType_, typename ValueType_, typename SemiRingType_> __global__ void DeviceSegmentReductionByKeyKernel( //this will call consume tile IndexType_ *dKeysIn, //input pointer to the arry of dot product carried out by row-ids, one per spmv block ValueType_ *dValuesIn, //input pointer to the arry of dot product carried out by row-ids, one per spmv block ValueType_ *dAggregatesOut, //output value aggregates - will be y-final output of method IndexType_ numItems, // total number of items to select int numTiles, //total number of tiles for the entire problem SemiRingType_ SR) { //now call cosntructor to initialize and consumeTile to calculate the row dot products AgentSegmentReduction<SpmvBlockThreadSegment, IndexType_, ValueType_, SemiRingType_>().consumeRange( dKeysIn, dValuesIn, dAggregatesOut, numItems, numTiles, SR); } template<typename IndexType_, typename ValueType_, typename SemiRingType_, bool hasAlpha, bool hasBeta> //matrix and vector value type //this is setting all the grid parameters and size struct DispatchSpmv { //declare constants enum { INIT_KERNEL_THREADS = 128 }; //sample tuning polic- can add more later //SM30 struct Policy350 //as a sample there are many other policies to follow { typedef SpmvBlockThread< (sizeof(ValueType_) > 4) ? 96 : 128, //for double use 96 threads per block otherwise 128 (sizeof(ValueType_) > 4) ? 4 : 4 //for double use 4 items per thread otherwise use 7 > SpmvPolicyT;///use instead of PtxPolicy come backa nd use cusparse to determine the architetcure }; struct Policy350Reduction //as a sample there are many other policies to follow { typedef SpmvBlockThread<128,3> SpmvPolicyT; //use instead of PtxPolicy come backa nd use cusparse to determine the architetcure };//for <128,1> 1 item per thread need a reduction by key __forceinline__ static hipError_t Dispatch(CsrMvParams<IndexType_,ValueType_> spParams, const SemiRingType_ &SR, hipStream_t stream = 0) { hipError_t error = hipSuccess; //could move this block to initkernel fucntion int blockThreads = Policy350::SpmvPolicyT::BLOCK_THREADS; int itemsPerThread = Policy350::SpmvPolicyT::ITEMS_PER_THREAD; int blockThreadsRed = Policy350Reduction::SpmvPolicyT::BLOCK_THREADS; int itemsPerThreadRed = Policy350Reduction::SpmvPolicyT::ITEMS_PER_THREAD; //calculate total number of spmv work items do { //do-while loop condition at end of loop //Get device ordinal int deviceOrdinal, smVersion, smCount, maxDimx; if (error = hipGetDevice(&deviceOrdinal)) break; //Get device SM version if (error = SmVersion(smVersion, deviceOrdinal)) break; //Get SM count-hipDeviceGetAttribute is built in cuda function if (error = hipDeviceGetAttribute(&smCount, hipDeviceAttributeMultiprocessorCount, deviceOrdinal)) break; //Get max dimension of the grid in the x direction if (error = hipDeviceGetAttribute(&maxDimx, hipDeviceAttributeMaxGridDimX, deviceOrdinal)) break; int numMergeItems = spParams.m + spParams.nnz; //total amount of work for one diagonal/thread //Tile sizes of relevant kernels int mergeTileSize = blockThreads * itemsPerThread; //for floats this will be a larger number //and since we will be dividing by it less memory allocated for the float case int segmentRedTileSize = blockThreadsRed * itemsPerThreadRed; //Calculate number of tiles for the kernels //need unsigned int to prevent underflow/overflow unsigned int numMergeTiles = (numMergeItems + mergeTileSize - 1) / mergeTileSize; //launch thread number unsigned int numSegmentRedTiles = (numMergeTiles + segmentRedTileSize - 1) / segmentRedTileSize; //int spmv_sm_occupancy ignore maxSmOccupancy function for now and corresponding segmentfixup //get grid dimensions use cuda built in dattetype dim3-has constructor with the 3 arguments dim3 spmvGridSize(min(numMergeTiles, (unsigned int) maxDimx), (numMergeTiles + maxDimx - 1) / maxDimx, //make sure at least 1 1); //2D grid //grid for second kernel dim3 segmentRedGridSize(min(numSegmentRedTiles, (unsigned int) maxDimx), (numSegmentRedTiles + maxDimx -1) / maxDimx, 1); Vector<Coord<IndexType_> > dTileCoords(numMergeTiles + 1, stream); Vector<IndexType_> dTileCarryKeys(numMergeTiles, stream); Vector<ValueType_> dTileCarryValues(numMergeTiles, stream); //Get search grid dimensions int searchBlockSize = INIT_KERNEL_THREADS; int searchGridSize = (numMergeTiles + searchBlockSize) / searchBlockSize; //ignored the +1 -1 //call Search Kernel within the host so need <<>>> //call devicesearch kernel to compute starting coordiantes of merge path hipLaunchKernelGGL(( DeviceSpmvSearchKernel<typename Policy350::SpmvPolicyT, IndexType_, ValueType_>) , dim3(searchGridSize), dim3(searchBlockSize), 0, stream , numMergeTiles, dTileCoords.raw(), spParams); cudaCheckError(); //this will give the starting coordaintes to be called in DeviceSPmvKernel hipLaunchKernelGGL(( DeviceSpmvKernel<typename Policy350::SpmvPolicyT, IndexType_,ValueType_, SemiRingType_, hasAlpha, hasBeta>) , dim3(spmvGridSize), dim3(blockThreads), 0, stream, spParams, SR, dTileCoords.raw(), dTileCarryKeys.raw(), dTileCarryValues.raw(), numMergeTiles); cudaCheckError(); //Run reduce by key kernel if necessary //if (error = hipPeekAtLastError()) break; //check for failure to launch if (numMergeTiles > 1) { hipLaunchKernelGGL(( DeviceSegmentReductionByKeyKernel<typename Policy350Reduction::SpmvPolicyT, IndexType_, ValueType_, SemiRingType_>) , dim3(segmentRedGridSize), dim3(blockThreadsRed), 0, 0, dTileCarryKeys.raw(), dTileCarryValues.raw(), spParams.y, numMergeTiles, numSegmentRedTiles, SR); cudaCheckError(); //if (error = hipPeekAtLastError()) break; //check for failure to launch of fixup kernel } } while(0); //make sure executes exactly once to give chance to break earlier with errors cudaCheckError(); return error; } }; template<typename IndexType_, typename ValueType_, typename SemiRingType_> hipError_t callDispatchSpmv(CsrMvParams<IndexType_, ValueType_> &spParams, const SemiRingType_ &SR, hipStream_t stream = 0) { hipError_t error; //determine semiring type if (spParams.beta == SR.times_null) { if (spParams.alpha == SR.times_ident) //simply y = A*x error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, false, false>::Dispatch(spParams, SR, stream); //must be on the device else error = DispatchSpmv<IndexType_, ValueType_,SemiRingType_, true, false>::Dispatch(spParams, SR, stream); //must be passed by reference to some since writing } else { if (spParams.alpha == SR.times_ident) error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, false, true>::Dispatch(spParams, SR, stream); else error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, true, true>::Dispatch(spParams, SR, stream); } return error; } template<typename IndexType_, typename ValueType_> hipError_t callSemiringSpmv(CsrMvParams<IndexType_, ValueType_> &spParams, Semiring SR, hipStream_t stream = 0) { // This is dangerous but we need to initialize this value, probably it's // better to return success than to return some misleading error code hipError_t error = hipSuccess; switch(SR) { case PlusTimes: { PlusTimesSemiring<ValueType_> plustimes; //can be float or double for real case error = callDispatchSpmv(spParams, plustimes, stream); } break; case MinPlus: { MinPlusSemiring<ValueType_> minplus; error = callDispatchSpmv(spParams, minplus, stream); } break; case MaxMin: { MaxMinSemiring<ValueType_> maxmin; error = callDispatchSpmv(spParams, maxmin, stream); } break; case OrAndBool: { OrAndBoolSemiring<ValueType_> orandbool; error = callDispatchSpmv(spParams, orandbool, stream); } break; case LogPlus: { LogPlusSemiring<ValueType_> logplus; error = callDispatchSpmv(spParams, logplus, stream); } break; } return error; } //create a device function interface to call the above dispatch function template <typename IndexType_, typename ValueType_> hipError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValueType_ * dValues, //all must be preallocated on the device IndexType_ * dRowOffsets, IndexType_ * dColIndices, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, hipStream_t stream) { //create user interface //calling device kernel depends on tempalte boolean parameters fro alpha/beta //Set parameters for struct CsrMvParams<IndexType_, ValueType_> spParams; spParams.m = m; spParams.n = n; spParams.nnz = nnz; spParams.alpha = alpha; spParams.beta = beta; spParams.csrRowPtr = dRowOffsets + 1; //ignore first 0 component in merge path specific for this spmv only spParams.csrVal = dValues; spParams.csrColInd = dColIndices; spParams.x = dVectorX; spParams.y = dVectorY; return callSemiringSpmv(spParams, SR, stream); } template<typename IndexType_, typename ValueType_> hipError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValuedCsrGraph <IndexType_, ValueType_> network, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, hipStream_t stream ) { //calling device kernel depends on tempalte boolean parameters fro alpha/beta //Set parameters for struct CsrMvParams<IndexType_, ValueType_> spParams; spParams.m = m; spParams.n = n; spParams.nnz = nnz; spParams.alpha = alpha; spParams.beta = beta; spParams.csrRowPtr = network.get_raw_row_offsets() + 1; //ignore first 0 component in merge path specific for this spmv only spParams.csrVal = network.get_raw_values(); spParams.csrColInd = network.get_raw_column_indices(); spParams.x = dVectorX; spParams.y = dVectorY; return callSemiringSpmv(spParams, SR, stream); } //declare template types to be called template hipError_t csrmv_mp<int, double>( int n, int m, int nnz, double alpha, double * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, double *dVectorX, double beta, double *dVectorY, Semiring SR, hipStream_t stream ); template hipError_t csrmv_mp<long long, double>( long long n, long long m, long long nnz, double alpha, double * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, double *dVectorX, double beta, double *dVectorY, Semiring SR, hipStream_t stream ); template hipError_t csrmv_mp<int, float>( int n, int m, int nnz, float alpha, float * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, float *dVectorX, float beta, float *dVectorY, Semiring SR, hipStream_t stream ); //for 64 bit support which may not be needed template hipError_t csrmv_mp<long long, float>( long long n, long long m, long long nnz, float alpha, float * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, float *dVectorX, float beta, float *dVectorY, Semiring SR, hipStream_t stream ); //assume embedding booleans in the reals /*template hipError_t csrmv_mp<int, bool>( int n, int m, int nnz, bool alpha, bool * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR ); //for 64 bit support which may not be needed template hipError_t csrmv_mp<long long, bool>( long long n, long long m, long long nnz, bool alpha, bool * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR );*/ //declare template types to be called using valued_csr_graph version template hipError_t csrmv_mp<int, double>( int n, int m, int nnz, double alpha, ValuedCsrGraph <int, double> network, double *dVectorX, double beta, double *dVectorY, Semiring SR, hipStream_t stream ); template hipError_t csrmv_mp<long long, double>( long long n, long long m, long long nnz, double alpha, ValuedCsrGraph <long long, double> network, double *dVectorX, double beta, double *dVectorY, Semiring SR, hipStream_t stream ); template hipError_t csrmv_mp<int, float>( int n, int m, int nnz, float alpha, ValuedCsrGraph <int, float> network, float *dVectorX, float beta, float *dVectorY, Semiring SR, hipStream_t stream ); //for 64 bit support which may not be needed template hipError_t csrmv_mp<long long, float>( long long n, long long m, long long nnz, float alpha, ValuedCsrGraph <long long, float> network, float *dVectorX, float beta, float *dVectorY, Semiring SR, hipStream_t stream ); /*template hipError_t csrmv_mp<int, bool>( int n, int m, int nnz, bool alpha, ValuedCsrGraph <int, bool> network, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR ); //for 64 bit support which may not be needed template hipError_t csrmv_mp<long long, bool>( long long n, long long m, long long nnz, bool alpha, ValuedCsrGraph <long long, bool> network, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR );*/ } //end namespace nvgraph using namespace nvgraph; //this is the standard kernel used to test the semiring operations template<typename IndexType_, typename ValueType_, typename SemiRingType_> __global__ void csrmv(IndexType_ num_rows, IndexType_ *dRowOffsets, IndexType_ *dColIndices, ValueType_ *dValues, ValueType_ *dVectorX, ValueType_ *dVectorY, SemiRingType_ SR, ValueType_ alpha, ValueType_ beta) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if (row < num_rows) { ValueType_ dot; SR.setPlus_ident(dot); //SR.setPlus_ident(dVectorY[row]); //need to initialize y outside IndexType_ row_start = dRowOffsets[row]; IndexType_ row_end = dRowOffsets[row + 1]; for (int i = row_start; i < row_end; i++) { dot = SR.plus(SR.times(alpha,SR.times(dValues[i], dVectorX[dColIndices[i]])), dot); } dVectorY[row] = SR.plus(dot, (SR.times(beta, dVectorY[row]))); } } template<typename IndexType_, typename ValueType_> void callTestCsrmv(IndexType_ num_rows, IndexType_ *dRowOffsets, IndexType_ *dColIndices, ValueType_ *dValues, ValueType_ *dVectorX, ValueType_ *dVectorY, nvgraph::Semiring SR, ValueType_ alpha, ValueType_ beta) { const int side = 2048; const int numThreads = 256; const int numBlocks = (side * side + numThreads - 1) / numThreads; switch(SR) { case nvgraph::PlusTimes: { nvgraph::PlusTimesSemiring<ValueType_> plustimes; //can be float or double for real case hipLaunchKernelGGL(( csrmv), dim3(numBlocks), dim3(numThreads), 0, 0, num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, plustimes, alpha, beta); } break; case nvgraph::MinPlus: { nvgraph::MinPlusSemiring<ValueType_> minplus; hipLaunchKernelGGL(( csrmv), dim3(numBlocks), dim3(numThreads), 0, 0, num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, minplus, alpha, beta); } break; case nvgraph::MaxMin: { nvgraph::MaxMinSemiring<ValueType_> maxmin; hipLaunchKernelGGL(( csrmv), dim3(numBlocks), dim3(numThreads), 0, 0, num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, maxmin, alpha, beta); } break; case nvgraph::OrAndBool: { nvgraph::OrAndBoolSemiring<ValueType_> orandbool; hipLaunchKernelGGL(( csrmv), dim3(numBlocks), dim3(numThreads), 0, 0, num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, orandbool, alpha, beta); } break; case nvgraph::LogPlus: { nvgraph::LogPlusSemiring<ValueType_> logplus; hipLaunchKernelGGL(( csrmv), dim3(numBlocks), dim3(numThreads), 0, 0, num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, logplus, alpha, beta); } break; } cudaCheckError(); } template void callTestCsrmv<int, float>(int num_rows, int *dRowOffsets, int*dColIndices, float *dValues, float *dVectorX, float *dVectorY, nvgraph::Semiring SR, float alpha, float beta); template void callTestCsrmv<int, double>(int num_rows, int *dRowOffsets, int*dColIndices, double *dValues, double *dVectorX, double *dVectorY, nvgraph::Semiring SR, double alpha, double beta);
5351db2db2f730d26df0b0cc30928cee187cb553.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* This file contains the nvgraph generalized implementation of the Duane Merrill's CUB CSRMV using MergePath */ #include "nvgraph_csrmv.hxx" #include "exclusive_kv_scan.hxx" //atomics are included in semiring #include "semiring.hxx" #include "nvgraph_error.hxx" //IMPORTANT: IndexType_ must be a signed integer, long, long long etc. Unsigned int is not supported, since -1 is //used as a flag value namespace nvgraph{ //Calculates SM to be used-add to cpp host file __forceinline__ cudaError_t SmVersion(int &smVersion, int deviceOrdinal) { cudaError_t error = cudaSuccess; //assume sucess and state otherwise if fails condition do { //Find out SM version int major, minor; if (error = cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, deviceOrdinal)) break; if (error = cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, deviceOrdinal)) break; smVersion = 100 * major + 10 * minor; } while(0); return error; } template< int _BLOCK_THREADS, //number of threads per thread block int _ITEMS_PER_THREAD> //number of items per individual thread struct SpmvBlockThread //this is in agent file other template parameters ignoring for now { //set constants enum { BLOCK_THREADS = _BLOCK_THREADS, //number of threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, //number of items per thread per tile(tid) of input }; }; //This function calculates the MergePath(load-balancing) for each thread by doing a binary search //along the diagonal template<typename IndexType_> __device__ __forceinline__ void MergePathSearch( IndexType_ diag, IndexType_ *A, //rowoffsets + 1 IndexType_ offset, //counter array IndexType_ A_length, IndexType_ B_length, Coord<IndexType_> &pathCoord) //returned by reference stores the path { IndexType_ splitMin = max(diag - B_length, IndexType_(0)); //must be nonnegative IndexType_ splitMax = min(diag, A_length); //stay in bounds //do binary search along diagonal while (splitMin < splitMax) { IndexType_ splitPivot = (splitMin + splitMax) / 2; //take average integer division-start in middle so can go up or down diagonal if (A[splitPivot] <= diag - splitPivot - 1 + offset) //i+j = diag -1 along cross diag **ignored B //move up A and down B from (i,j) to (i-1,j+1) { splitMin = splitPivot + 1; //increase a in case that it is less clearly before split_min <= split_pivot less than average } else { //move down A and up B splitMax = splitPivot; } } //transform back to array coordinates from cross diagaonl coordinates pathCoord.x = min(splitMin, A_length); //make sure do not go out of bounds; //constraint i + j = k pathCoord.y = diag - splitMin; } //Spmv search kernel that calls merge path and identifies the merge path starting coordinates for each tile template <typename SpmvBlockThread, typename IndexType_, typename ValueType_> __global__ void DeviceSpmvSearchKernel( //calls device function merge path int numMergeTiles, //[input] Number of spmv merge tiles which is the spmv grid size Coord<IndexType_> *dTileCoords, //[output] pointer to a temporary array of tile starting coordinates CsrMvParams<IndexType_, ValueType_> spParams) //[input] spmv input parameter with corrdponding needed arrays { //set the constants for the gpu architecture enum { BLOCK_THREADS = SpmvBlockThread::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvBlockThread::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid <= numMergeTiles) //verify within domain { IndexType_ diag = tid * TILE_ITEMS; Coord<IndexType_> tileCoord; //each tid will compute its own tile_coordinate //the above coordinate will be stored in tile_coordinate passed by reference //input row pointer starting at csrRowPtr[1] merge path ignores the 0 entry //the first argument to the counting constructor is the size-nnz and the second argument is where to start countings IndexType_ countStart = 0; //if row pointer is 1 based make sure count starts at 1 instead of 0 MergePathSearch(diag, spParams.csrRowPtr, countStart, spParams.m, spParams.nnz, tileCoord); //store path of thread in array of coordinates dTileCoords[tid] = tileCoord; //stores (y,x) = (i.j) coord of thread computed* } } //Agent sturct with two main inline functions which compute the spmv template< typename SpmvPolicyT, // parameterized SpmvBlockThread tuning policy type as listed above typename IndexType_, //index value of rowOffsets and ColIndices typename ValueType_, //matrix and vector value type typename SemiRingType_, //this follows different semiring structs to be passed depending on the enum bool hasAlpha, //signifies whether the input parameter alpha is 1 in y = alpha*A*x + beta*A*y bool hasBeta> //signifies whether the input parameter beta is 0 struct AgentSpmv { //set constants enum { BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; //we use the return type pair for scanning where the pairs are accumulated segment-value with segemn-index __device__ __forceinline__ KeyValuePair<IndexType_,ValueType_> consumeTile( Coord<IndexType_> tileStartCoord, //this gives the starting coordinate to be determined from the initial mergepath call Coord<IndexType_> tileEndCoord, CsrMvParams<IndexType_, ValueType_> &spParams, SemiRingType_ SR) //pass struct as a const reference { IndexType_ tileNumRows = tileEndCoord.x - tileStartCoord.x; //length(rowOffSets) = numRows + 1 in merge path ignore first element for 1 and so length of path in x-direction gives the exact number of rows IndexType_ tileNnz = tileEndCoord.y - tileStartCoord.y; //number of nonzero goes down path countingITerator is indexed by columnInd and Val array which are of size nnz //load row offsets into shared memory-create shared memory row offset pointer __shared__ IndexType_ smemTileRowPtr[ITEMS_PER_THREAD + TILE_ITEMS + 1]; //copy row offsets into shared memory for accumulating matrix vector dot products in the merge path for (int item = threadIdx.x; item <= tileNumRows; item += BLOCK_THREADS) //index by block_threads that is the number of threads per block //start with rowoffsets at the strat coordinate and corresponding threadId can modiy wd to do a cache wrapper for efficiency later { if ((tileStartCoord.x + item) < spParams.m) //memory protection since already at +1 only go up to m { smemTileRowPtr[item] = spParams.csrRowPtr[tileStartCoord.x + item]; } } //after loading into shared memory we must sync the threads to make sure all complete __syncthreads(); Coord<IndexType_> threadStartCoord; //call MergePath again on shared memory after using start indices IndexType_ diag = threadIdx.x * ITEMS_PER_THREAD; //compute diagonal //shared memory row pointer has been indexed down to 0 so count offset can start at 0 too //counter iterator starts at current y position IndexType_ countIndId = tileStartCoord.y; MergePathSearch(diag, smemTileRowPtr, //sort list A = row offsets in shared memort countIndId, //sort list B = natural number consecutive counting indices starting index tileNumRows, tileNnz, threadStartCoord); //resulting path is stored in threadStartCoord __syncthreads(); //make sure every thread has completed their diagonal of merge path //Compute the thread's merge path segment to perform the dot product foing down the merge path below in the loop Coord<IndexType_> threadCurrentCoord = threadStartCoord; KeyValuePair<IndexType_, ValueType_> scanSegment[ITEMS_PER_THREAD]; //static array of type key value pairs //initialize each dot product contribution to 0 ValueType_ totalValue; SR.setPlus_ident(totalValue);//initialize to semiring identity for plus operation #pragma unroll //unroll for loop for efficiency for (int item = 0; item < ITEMS_PER_THREAD; ++item) //loop over items belonging to thread along merge path { //go down merge path and sum. when move to right new component of result vector y //countInd is consecutive nonzero natural number array going down the matrix B so //indexed by y whereas rowOffset goes to the move and is A indexed by x countIndId = threadCurrentCoord.y + tileStartCoord.y; //line number problem IndexType_ nnzId = min(countIndId, spParams.nnz - 1); //make sure stay in bounds IndexType_ colIdx = spParams.csrColInd[nnzId]; ValueType_ A_val = spParams.csrVal[nnzId]; //A val //we assume A and x are of the same datatype //recall standard algorithm : y[row] += val[nz]*x[colInd[nnz]] in traditional sparse matrix vector form ValueType_ x_val = spParams.x[colIdx]; //csrColInd[nnzId] //wrapper of x vector could change dependent on the architecture //counter will tell direction to move either right or down since last entry of rowoffsets is the totla number of nonzeros //the counter array keeps track of this if (countIndId < smemTileRowPtr[threadCurrentCoord.x]) //this means less than the number of nonzeros in that row { //move down current row accumulating matrix and vector dot product totalValue = SR.plus(SR.times(A_val, x_val), totalValue); //add binary operation because may change to minus and min rather than + and * //store in key value pair scanSegment[item].key = tileNumRows; scanSegment[item].value = totalValue; ++threadCurrentCoord.y; } else //move right to new row and reset {//added in else if condition scanSegment[item].key = threadCurrentCoord.x; scanSegment[item].value = totalValue; //store current without adding new and set to 0 for new row SR.setPlus_ident(totalValue);//0.0;//SR.times_null; ++threadCurrentCoord.x; } } __syncthreads(); //now each thread block has their matrix vector multiplication and we must do a blockwide reduction //Block-wide reduce-value-by-segment KeyValuePair<IndexType_, ValueType_> scanItem, tileCarry; //this is the key value pair that we will be returning scanItem.key = threadCurrentCoord.x; //added min in other version had min with num rows scanItem.value = totalValue; PrefixSum<IndexType_, ValueType_, SemiRingType_, BLOCK_THREADS>(SR).ExclusiveKeyValueScan(scanItem, tileCarry); if (tileNumRows > 0) { if (threadIdx.x == 0) scanItem.key = -1; //can be negative imp to be int rather than unsigned int //do a direct scatter #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { if (scanSegment[item].key < tileNumRows) //scanSegment is an array of key value pairs { if (scanItem.key == scanSegment[item].key) { scanSegment[item].value = SR.plus(scanItem.value, scanSegment[item].value); } if (hasAlpha){ //boolean set to 1 need to multiply Ax by alpha as stored in spParams scanSegment[item].value = SR.times(spParams.alpha, scanSegment[item].value); } //check if has beta then need to alter y the right hand side is multiplied by beta if (hasBeta) { //y = alpha*A*x + beta*y ValueType_ y_val = spParams.y[tileStartCoord.x + scanSegment[item].key]; //currentxcoord is stored in the key and this will give corresponding and desired row entry in y scanSegment[item].value = SR.plus(SR.times(spParams.beta, y_val), scanSegment[item].value); } //Set the output vector row element spParams.y[tileStartCoord.x + scanSegment[item].key] = scanSegment[item].value; //disjoint keys } } } //Return the til'es running carry-out key value pair return tileCarry; //will come from exclusive scan } //overload consumetile function for the one in the interafce which will be called by the dispatch function __device__ __forceinline__ void consumeTile ( Coord<IndexType_> *dTileCoords, //pointer to the temporary array of tile starting cooordinates IndexType_ *dTileCarryKeys, //output pointer to temporary array carry-out dot product row-ids, one per block ValueType_ *dTileCarryValues, //output pointer to temporary array carry-out dot product row-ids, one per block int numMergeTiles, //number of merge tiles CsrMvParams<IndexType_, ValueType_> spParams, SemiRingType_ SR) { int tid = (blockIdx.x * gridDim.y) + blockIdx.y; //curent tile index //only continue if tid is in proper range if (tid >= numMergeTiles) return; Coord<IndexType_> tileStartCoord = dTileCoords[tid]; //+0 ignored Coord<IndexType_> tileEndCoord = dTileCoords[tid + 1]; //Consume multi-segment tile by calling above consumeTile overloaded function KeyValuePair<IndexType_, ValueType_> tileCarry = consumeTile( tileStartCoord, tileEndCoord, spParams, SR); //output the tile's carry out if (threadIdx.x == 0) { if (hasAlpha) tileCarry.value = SR.times(spParams.alpha, tileCarry.value); tileCarry.key += tileStartCoord.x; if (tileCarry.key < spParams.m) { dTileCarryKeys[tid] = tileCarry.key; dTileCarryValues[tid] = tileCarry.value; } else { // Make sure to reject keys larger than the matrix size directly here. // printf("%d %lf\n",tileCarry.key , tileCarry.value); // this patch may be obsolete after the changes related to bug#1754610 dTileCarryKeys[tid] = -1; } } } }; //this device kernel will call the above agent function-ignoring policies for now template < typename SpmvBlockThread, //parameterized spmvpolicy tunign policy type typename IndexType_, //index type either 32 bit or 64 bit integer for rowoffsets of columnindices typename ValueType_, //matrix and vector value type typename SemiRingType_, //this follows different semiring structs to be passed depending on the enum bool hasAlpha, //determines where alpha = 1 as above bool hasBeta> //determines whether beta = 0 as above __global__ void DeviceSpmvKernel( //this will call consume tile CsrMvParams<IndexType_, ValueType_> spParams, //pass constant reference to spmv parameters const SemiRingType_ &SR, Coord<IndexType_> *dTileCoords, //input pointer to temporaray array of the tile starting coordinates of each (y,x) = (i,j) pair on the merge path IndexType_ *dTileCarryKeys, //output is a pointer to the temp array that carries out the dot porduct row-ids where it is one per block ValueType_ *dTileCarryValues, //output is a pointer to the temp array that carries out the dot porduct row-ids where it is one per block int numTiles //input which is the number of merge tiles ) { //call Spmv agent type specialization- need to fix this call!! //now call cosntructor to initialize and consumeTile to calculate the row dot products AgentSpmv<SpmvBlockThread, IndexType_, ValueType_, SemiRingType_, hasAlpha, hasBeta>().consumeTile( dTileCoords, dTileCarryKeys, dTileCarryValues, numTiles, spParams, SR); } //Helper functions for the reduction by kernel //for block loading block_load_vectorize for SM_30 implemenation from cub //Load linear segment into blocked arrangement across the thread block, guarded by range, //with a fall-back assignment of -1 for out of bound template<int ITEMS_PER_THREAD, typename IndexType_, typename ValueType_> __device__ __forceinline__ void loadDirectBlocked( int linearTid, //input:a asuitable 1d thread-identifier for calling the thread IndexType_ *blockItrKeys, //input: thread block's base input iterator for loading from ValueType_ *blockItrValues, //input: thread block's base input iterator for loading from KeyValuePair<IndexType_, ValueType_> (&items)[ITEMS_PER_THREAD], // output:data to load int validItems, //input:Number of valid items to load KeyValuePair<IndexType_, ValueType_> outOfBoundsDefault) //input:Default value to assign to out of bounds items -1 in this case { #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { int offset = (linearTid * ITEMS_PER_THREAD) + item; // changed validItems to validItems-1 for bug#1754610 since it was causing uninitialized memory accesses here items[item].key = (offset < validItems-1) ? blockItrKeys[offset] : outOfBoundsDefault.key; items[item].value = (offset < validItems-1) ? blockItrValues[offset] : outOfBoundsDefault.value; } } //load linear segment of items into a blocked arangement across a thread block template<int ITEMS_PER_THREAD, typename IndexType_, typename ValueType_> __device__ __forceinline__ void loadDirectBlocked( int linearTid, IndexType_ * blockItrKeys, ValueType_ * blockItrValues, KeyValuePair<IndexType_,ValueType_> (&items)[ITEMS_PER_THREAD]) { //Load directly in thread-blocked order #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { items[item].key = blockItrKeys[(linearTid *ITEMS_PER_THREAD) + item]; items[item].value = blockItrValues[(linearTid *ITEMS_PER_THREAD) + item]; } } //This part pertains to the fixup kernel which does a device-wide reduce-value-by-key //for the thread blocks template< typename SpmvPolicyT, // parameterized SpmvBlockThread tuning policy type as listed above typename IndexType_, typename ValueType_, typename SemiRingType_> //matrix and vector value type struct AgentSegmentReduction { //set constants enum { BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; //This function processes an input tile and uses an atomic rewrite strategy template<bool isLastTile> __device__ __forceinline__ void consumeTilePost( IndexType_ *dInKeys, //input array of key value pairs ValueType_ *dInValues, //input array of key value pairs ValueType_ *dAggregatesOut, //output value aggregates into final array y IndexType_ numRemaining, //Number of global input items remaining including this tile IndexType_ tileOffset, //Tile offset SemiRingType_ SR ) { KeyValuePair<IndexType_,ValueType_> pairs[ITEMS_PER_THREAD]; KeyValuePair<IndexType_, ValueType_> outOfBoundsPair; outOfBoundsPair.key = -1; //default value to assign to out of bounds items is set to be -1 int linearTid = threadIdx.x; //load the values into pairs if (isLastTile) { loadDirectBlocked<ITEMS_PER_THREAD, IndexType_, ValueType_> (linearTid, dInKeys + tileOffset, dInValues + tileOffset, pairs, numRemaining, outOfBoundsPair); } else { loadDirectBlocked<ITEMS_PER_THREAD, IndexType_, ValueType_> (linearTid, dInKeys + tileOffset, dInValues + tileOffset, pairs); } #pragma unroll for (int item = 1; item < ITEMS_PER_THREAD; ++item) { ValueType_ *dScatter = dAggregatesOut + pairs[item-1].key; //write to correct row using the key if (pairs[item].key != pairs[item-1].key) { SR.atomicPlus(dScatter, pairs[item -1].value); } else pairs[item].value = SR.plus(pairs[item -1].value, pairs[item].value); //the operation is SUm } // Write out last item if it is valid by checking last key boolean. // pairs[ITEMS_PER_THREAD - 1].key = -1 for out bound elements. ValueType_ *dScatter = dAggregatesOut + pairs[ITEMS_PER_THREAD - 1].key; if ((!isLastTile || pairs[ITEMS_PER_THREAD - 1].key >= 0)) { //printf("hello %d %lf\n", pairs[ITEMS_PER_THREAD - 1].key , pairs[ITEMS_PER_THREAD -1].value); SR.atomicPlus(dScatter, pairs[ITEMS_PER_THREAD -1].value); } } //this function will call consumeTilePost and it scans the tiles of items as a part of a dynamic chained scan __device__ __forceinline__ void consumeRange( IndexType_ *dKeysIn, //input array of key value pairs ValueType_ *dValuesIn, //input array of key value pairs ValueType_ *dAggregatesOut, //output value aggregates into final array y int numItems, //totall number of input items int numTiles, //total number of input tiles SemiRingType_ SR) { //Blocks are launched in increasing order, so we assign one tile per block int tileIdx = (blockIdx.x * gridDim.y) + blockIdx.y; //current tile index same as in consumeTile IndexType_ tileOffset = tileIdx * TILE_ITEMS; //Global offset for the current tile IndexType_ numRemaining = numItems - tileOffset; //Remaining items which includes this tile if (numRemaining > TILE_ITEMS) //this is not the last tile so call wit template argument set to be false consumeTilePost<false>(dKeysIn, dValuesIn, dAggregatesOut, numRemaining,tileOffset, SR); else if (numRemaining > 0) //this is the last tile which could be possibly partially full consumeTilePost<true>(dKeysIn, dValuesIn, dAggregatesOut, numRemaining,tileOffset, SR); } }; //Blockwide reduction by key final kernel template < typename SpmvBlockThreadSegment, //parameterized spmvpolicy tuning policy type typename IndexType_, typename ValueType_, typename SemiRingType_> __global__ void DeviceSegmentReductionByKeyKernel( //this will call consume tile IndexType_ *dKeysIn, //input pointer to the arry of dot product carried out by row-ids, one per spmv block ValueType_ *dValuesIn, //input pointer to the arry of dot product carried out by row-ids, one per spmv block ValueType_ *dAggregatesOut, //output value aggregates - will be y-final output of method IndexType_ numItems, // total number of items to select int numTiles, //total number of tiles for the entire problem SemiRingType_ SR) { //now call cosntructor to initialize and consumeTile to calculate the row dot products AgentSegmentReduction<SpmvBlockThreadSegment, IndexType_, ValueType_, SemiRingType_>().consumeRange( dKeysIn, dValuesIn, dAggregatesOut, numItems, numTiles, SR); } template<typename IndexType_, typename ValueType_, typename SemiRingType_, bool hasAlpha, bool hasBeta> //matrix and vector value type //this is setting all the grid parameters and size struct DispatchSpmv { //declare constants enum { INIT_KERNEL_THREADS = 128 }; //sample tuning polic- can add more later //SM30 struct Policy350 //as a sample there are many other policies to follow { typedef SpmvBlockThread< (sizeof(ValueType_) > 4) ? 96 : 128, //for double use 96 threads per block otherwise 128 (sizeof(ValueType_) > 4) ? 4 : 4 //for double use 4 items per thread otherwise use 7 > SpmvPolicyT;///use instead of PtxPolicy come backa nd use cusparse to determine the architetcure }; struct Policy350Reduction //as a sample there are many other policies to follow { typedef SpmvBlockThread<128,3> SpmvPolicyT; //use instead of PtxPolicy come backa nd use cusparse to determine the architetcure };//for <128,1> 1 item per thread need a reduction by key __forceinline__ static cudaError_t Dispatch(CsrMvParams<IndexType_,ValueType_> spParams, const SemiRingType_ &SR, cudaStream_t stream = 0) { cudaError_t error = cudaSuccess; //could move this block to initkernel fucntion int blockThreads = Policy350::SpmvPolicyT::BLOCK_THREADS; int itemsPerThread = Policy350::SpmvPolicyT::ITEMS_PER_THREAD; int blockThreadsRed = Policy350Reduction::SpmvPolicyT::BLOCK_THREADS; int itemsPerThreadRed = Policy350Reduction::SpmvPolicyT::ITEMS_PER_THREAD; //calculate total number of spmv work items do { //do-while loop condition at end of loop //Get device ordinal int deviceOrdinal, smVersion, smCount, maxDimx; if (error = cudaGetDevice(&deviceOrdinal)) break; //Get device SM version if (error = SmVersion(smVersion, deviceOrdinal)) break; //Get SM count-cudaDeviceGetAttribute is built in cuda function if (error = cudaDeviceGetAttribute(&smCount, cudaDevAttrMultiProcessorCount, deviceOrdinal)) break; //Get max dimension of the grid in the x direction if (error = cudaDeviceGetAttribute(&maxDimx, cudaDevAttrMaxGridDimX, deviceOrdinal)) break; int numMergeItems = spParams.m + spParams.nnz; //total amount of work for one diagonal/thread //Tile sizes of relevant kernels int mergeTileSize = blockThreads * itemsPerThread; //for floats this will be a larger number //and since we will be dividing by it less memory allocated for the float case int segmentRedTileSize = blockThreadsRed * itemsPerThreadRed; //Calculate number of tiles for the kernels //need unsigned int to prevent underflow/overflow unsigned int numMergeTiles = (numMergeItems + mergeTileSize - 1) / mergeTileSize; //launch thread number unsigned int numSegmentRedTiles = (numMergeTiles + segmentRedTileSize - 1) / segmentRedTileSize; //int spmv_sm_occupancy ignore maxSmOccupancy function for now and corresponding segmentfixup //get grid dimensions use cuda built in dattetype dim3-has constructor with the 3 arguments dim3 spmvGridSize(min(numMergeTiles, (unsigned int) maxDimx), (numMergeTiles + maxDimx - 1) / maxDimx, //make sure at least 1 1); //2D grid //grid for second kernel dim3 segmentRedGridSize(min(numSegmentRedTiles, (unsigned int) maxDimx), (numSegmentRedTiles + maxDimx -1) / maxDimx, 1); Vector<Coord<IndexType_> > dTileCoords(numMergeTiles + 1, stream); Vector<IndexType_> dTileCarryKeys(numMergeTiles, stream); Vector<ValueType_> dTileCarryValues(numMergeTiles, stream); //Get search grid dimensions int searchBlockSize = INIT_KERNEL_THREADS; int searchGridSize = (numMergeTiles + searchBlockSize) / searchBlockSize; //ignored the +1 -1 //call Search Kernel within the host so need <<>>> //call devicesearch kernel to compute starting coordiantes of merge path DeviceSpmvSearchKernel<typename Policy350::SpmvPolicyT, IndexType_, ValueType_> <<<searchGridSize, searchBlockSize, 0, stream >>>( numMergeTiles, dTileCoords.raw(), spParams); cudaCheckError(); //this will give the starting coordaintes to be called in DeviceSPmvKernel DeviceSpmvKernel<typename Policy350::SpmvPolicyT, IndexType_,ValueType_, SemiRingType_, hasAlpha, hasBeta> <<<spmvGridSize, blockThreads, 0, stream>>>( spParams, SR, dTileCoords.raw(), dTileCarryKeys.raw(), dTileCarryValues.raw(), numMergeTiles); cudaCheckError(); //Run reduce by key kernel if necessary //if (error = cudaPeekAtLastError()) break; //check for failure to launch if (numMergeTiles > 1) { DeviceSegmentReductionByKeyKernel<typename Policy350Reduction::SpmvPolicyT, IndexType_, ValueType_, SemiRingType_> <<<segmentRedGridSize, blockThreadsRed, 0>>> (dTileCarryKeys.raw(), dTileCarryValues.raw(), spParams.y, numMergeTiles, numSegmentRedTiles, SR); cudaCheckError(); //if (error = cudaPeekAtLastError()) break; //check for failure to launch of fixup kernel } } while(0); //make sure executes exactly once to give chance to break earlier with errors cudaCheckError(); return error; } }; template<typename IndexType_, typename ValueType_, typename SemiRingType_> cudaError_t callDispatchSpmv(CsrMvParams<IndexType_, ValueType_> &spParams, const SemiRingType_ &SR, cudaStream_t stream = 0) { cudaError_t error; //determine semiring type if (spParams.beta == SR.times_null) { if (spParams.alpha == SR.times_ident) //simply y = A*x error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, false, false>::Dispatch(spParams, SR, stream); //must be on the device else error = DispatchSpmv<IndexType_, ValueType_,SemiRingType_, true, false>::Dispatch(spParams, SR, stream); //must be passed by reference to some since writing } else { if (spParams.alpha == SR.times_ident) error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, false, true>::Dispatch(spParams, SR, stream); else error = DispatchSpmv<IndexType_, ValueType_, SemiRingType_, true, true>::Dispatch(spParams, SR, stream); } return error; } template<typename IndexType_, typename ValueType_> cudaError_t callSemiringSpmv(CsrMvParams<IndexType_, ValueType_> &spParams, Semiring SR, cudaStream_t stream = 0) { // This is dangerous but we need to initialize this value, probably it's // better to return success than to return some misleading error code cudaError_t error = cudaSuccess; switch(SR) { case PlusTimes: { PlusTimesSemiring<ValueType_> plustimes; //can be float or double for real case error = callDispatchSpmv(spParams, plustimes, stream); } break; case MinPlus: { MinPlusSemiring<ValueType_> minplus; error = callDispatchSpmv(spParams, minplus, stream); } break; case MaxMin: { MaxMinSemiring<ValueType_> maxmin; error = callDispatchSpmv(spParams, maxmin, stream); } break; case OrAndBool: { OrAndBoolSemiring<ValueType_> orandbool; error = callDispatchSpmv(spParams, orandbool, stream); } break; case LogPlus: { LogPlusSemiring<ValueType_> logplus; error = callDispatchSpmv(spParams, logplus, stream); } break; } return error; } //create a device function interface to call the above dispatch function template <typename IndexType_, typename ValueType_> cudaError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValueType_ * dValues, //all must be preallocated on the device IndexType_ * dRowOffsets, IndexType_ * dColIndices, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, cudaStream_t stream) { //create user interface //calling device kernel depends on tempalte boolean parameters fro alpha/beta //Set parameters for struct CsrMvParams<IndexType_, ValueType_> spParams; spParams.m = m; spParams.n = n; spParams.nnz = nnz; spParams.alpha = alpha; spParams.beta = beta; spParams.csrRowPtr = dRowOffsets + 1; //ignore first 0 component in merge path specific for this spmv only spParams.csrVal = dValues; spParams.csrColInd = dColIndices; spParams.x = dVectorX; spParams.y = dVectorY; return callSemiringSpmv(spParams, SR, stream); } template<typename IndexType_, typename ValueType_> cudaError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValuedCsrGraph <IndexType_, ValueType_> network, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, cudaStream_t stream ) { //calling device kernel depends on tempalte boolean parameters fro alpha/beta //Set parameters for struct CsrMvParams<IndexType_, ValueType_> spParams; spParams.m = m; spParams.n = n; spParams.nnz = nnz; spParams.alpha = alpha; spParams.beta = beta; spParams.csrRowPtr = network.get_raw_row_offsets() + 1; //ignore first 0 component in merge path specific for this spmv only spParams.csrVal = network.get_raw_values(); spParams.csrColInd = network.get_raw_column_indices(); spParams.x = dVectorX; spParams.y = dVectorY; return callSemiringSpmv(spParams, SR, stream); } //declare template types to be called template cudaError_t csrmv_mp<int, double>( int n, int m, int nnz, double alpha, double * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<long long, double>( long long n, long long m, long long nnz, double alpha, double * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<int, float>( int n, int m, int nnz, float alpha, float * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, float>( long long n, long long m, long long nnz, float alpha, float * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); //assume embedding booleans in the reals /*template cudaError_t csrmv_mp<int, bool>( int n, int m, int nnz, bool alpha, bool * dValues, //all must be preallocated on the device int * dRowOffsets, int * dColIndices, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, bool>( long long n, long long m, long long nnz, bool alpha, bool * dValues, //all must be preallocated on the device long long * dRowOffsets, long long * dColIndices, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR );*/ //declare template types to be called using valued_csr_graph version template cudaError_t csrmv_mp<int, double>( int n, int m, int nnz, double alpha, ValuedCsrGraph <int, double> network, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<long long, double>( long long n, long long m, long long nnz, double alpha, ValuedCsrGraph <long long, double> network, double *dVectorX, double beta, double *dVectorY, Semiring SR, cudaStream_t stream ); template cudaError_t csrmv_mp<int, float>( int n, int m, int nnz, float alpha, ValuedCsrGraph <int, float> network, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, float>( long long n, long long m, long long nnz, float alpha, ValuedCsrGraph <long long, float> network, float *dVectorX, float beta, float *dVectorY, Semiring SR, cudaStream_t stream ); /*template cudaError_t csrmv_mp<int, bool>( int n, int m, int nnz, bool alpha, ValuedCsrGraph <int, bool> network, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR ); //for 64 bit support which may not be needed template cudaError_t csrmv_mp<long long, bool>( long long n, long long m, long long nnz, bool alpha, ValuedCsrGraph <long long, bool> network, bool *dVectorX, bool beta, bool *dVectorY, Semiring SR );*/ } //end namespace nvgraph using namespace nvgraph; //this is the standard kernel used to test the semiring operations template<typename IndexType_, typename ValueType_, typename SemiRingType_> __global__ void csrmv(IndexType_ num_rows, IndexType_ *dRowOffsets, IndexType_ *dColIndices, ValueType_ *dValues, ValueType_ *dVectorX, ValueType_ *dVectorY, SemiRingType_ SR, ValueType_ alpha, ValueType_ beta) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if (row < num_rows) { ValueType_ dot; SR.setPlus_ident(dot); //SR.setPlus_ident(dVectorY[row]); //need to initialize y outside IndexType_ row_start = dRowOffsets[row]; IndexType_ row_end = dRowOffsets[row + 1]; for (int i = row_start; i < row_end; i++) { dot = SR.plus(SR.times(alpha,SR.times(dValues[i], dVectorX[dColIndices[i]])), dot); } dVectorY[row] = SR.plus(dot, (SR.times(beta, dVectorY[row]))); } } template<typename IndexType_, typename ValueType_> void callTestCsrmv(IndexType_ num_rows, IndexType_ *dRowOffsets, IndexType_ *dColIndices, ValueType_ *dValues, ValueType_ *dVectorX, ValueType_ *dVectorY, nvgraph::Semiring SR, ValueType_ alpha, ValueType_ beta) { const int side = 2048; const int numThreads = 256; const int numBlocks = (side * side + numThreads - 1) / numThreads; switch(SR) { case nvgraph::PlusTimes: { nvgraph::PlusTimesSemiring<ValueType_> plustimes; //can be float or double for real case csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, plustimes, alpha, beta); } break; case nvgraph::MinPlus: { nvgraph::MinPlusSemiring<ValueType_> minplus; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, minplus, alpha, beta); } break; case nvgraph::MaxMin: { nvgraph::MaxMinSemiring<ValueType_> maxmin; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, maxmin, alpha, beta); } break; case nvgraph::OrAndBool: { nvgraph::OrAndBoolSemiring<ValueType_> orandbool; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, orandbool, alpha, beta); } break; case nvgraph::LogPlus: { nvgraph::LogPlusSemiring<ValueType_> logplus; csrmv<<<numBlocks, numThreads>>>(num_rows, dRowOffsets, dColIndices, dValues, dVectorX, dVectorY, logplus, alpha, beta); } break; } cudaCheckError(); } template void callTestCsrmv<int, float>(int num_rows, int *dRowOffsets, int*dColIndices, float *dValues, float *dVectorX, float *dVectorY, nvgraph::Semiring SR, float alpha, float beta); template void callTestCsrmv<int, double>(int num_rows, int *dRowOffsets, int*dColIndices, double *dValues, double *dVectorX, double *dVectorY, nvgraph::Semiring SR, double alpha, double beta);
c4212861b77f42997f2815bfa59c8855ae58e54b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixMultiplicationKernel.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c4212861b77f42997f2815bfa59c8855ae58e54b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixMultiplicationKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixMultiplicationKernel<<<gridBlock,threadBlock>>>(A,B,C,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixMultiplicationKernel<<<gridBlock,threadBlock>>>(A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixMultiplicationKernel<<<gridBlock,threadBlock>>>(A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a960ccff25bca93b188ac3f4bb3a3f903eca6451.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_scanNaiveSumVertical.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *_d_out_integralImage = NULL; hipMalloc(&_d_out_integralImage, XSIZE*YSIZE); unsigned char *_d_in_image = NULL; hipMalloc(&_d_in_image, XSIZE*YSIZE); int _h_width = XSIZE; int _h_height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_scanNaiveSumVertical), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_out_integralImage,_d_in_image,_h_width,_h_height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_scanNaiveSumVertical), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_out_integralImage,_d_in_image,_h_width,_h_height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_scanNaiveSumVertical), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_out_integralImage,_d_in_image,_h_width,_h_height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a960ccff25bca93b188ac3f4bb3a3f903eca6451.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_scanNaiveSumVertical.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *_d_out_integralImage = NULL; cudaMalloc(&_d_out_integralImage, XSIZE*YSIZE); unsigned char *_d_in_image = NULL; cudaMalloc(&_d_in_image, XSIZE*YSIZE); int _h_width = XSIZE; int _h_height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_scanNaiveSumVertical<<<gridBlock,threadBlock>>>(_d_out_integralImage,_d_in_image,_h_width,_h_height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_scanNaiveSumVertical<<<gridBlock,threadBlock>>>(_d_out_integralImage,_d_in_image,_h_width,_h_height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_scanNaiveSumVertical<<<gridBlock,threadBlock>>>(_d_out_integralImage,_d_in_image,_h_width,_h_height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b08c84e8824c004daaeac6cb6f8ce49a055d6d70.hip
// !!! This is a file automatically generated by hipify!!! // This is here so Netbeans doesn't error-spam my IDE #if !defined(__HIPCC__) // define the keywords, so that the IDE does not complain about them #define __global__ #define __device__ #define __shared__ #define __constant__ #define blockIdx.x 1 #define blockDim.x 1 #define threadIdx.x 1 #endif #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "GRT_Common/GRTCommon.h" #include "CUDA_Common/CUDA_SAFE_CALL.h" // These will be the same for all GPUs working on a hash. __device__ __constant__ unsigned char NTLM_Regenerate_Device_Charset_Constant[512]; // Constant space for charset __device__ __constant__ uint32_t NTLM_Regenerate_Device_Charset_Length; // Character set length __device__ __constant__ uint32_t NTLM_Regenerate_Device_Chain_Length; // May as well pull it from constant memory... faster when cached. __device__ __constant__ uint32_t NTLM_Regenerate_Device_Table_Index; __device__ __constant__ uint32_t NTLM_Regenerate_Device_Number_Of_Threads; // It needs this, and can't easily calculate it __device__ __constant__ uint32_t NTLM_Regenerate_Device_Number_Of_Chains_To_Regen; __device__ __constant__ uint32_t NTLM_Regenerate_Device_Number_Of_Hashes; __device__ __constant__ unsigned char NTLM_Regenerate_constantBitmap[8192]; // for lookups #include "../../inc/CUDA_Common/CUDA_MD4.h" #include "../../inc/CUDA_Common/Hash_Common.h" #include "../../inc/GRT_CUDA_device/CUDA_Reduction_Functions.h" #include "../../inc/GRT_CUDA_device/CUDA_Load_Store_Registers.h" // Copy the shared variables to the host extern "C" void copyNTLMRegenerateDataToConstant(char *hostCharset, uint32_t hostCharsetLength, uint32_t hostChainLength, uint32_t hostTableIndex, uint32_t hostNumberOfThreads, unsigned char *hostBitmap, uint32_t hostNumberOfHashes) { CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_Device_Charset_Constant, hostCharset, 512)); CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_Device_Charset_Length, &hostCharsetLength, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_Device_Chain_Length, &hostChainLength, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_Device_Table_Index, &hostTableIndex, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_Device_Number_Of_Threads, &hostNumberOfThreads, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_constantBitmap, hostBitmap, 8192)); CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_Device_Number_Of_Hashes, &hostNumberOfHashes, sizeof(uint32_t))); } extern "C" void setNTLMRegenerateNumberOfChains(uint32_t numberOfChains) { CH_CUDA_SAFE_CALL(hipMemcpyToSymbol(NTLM_Regenerate_Device_Number_Of_Chains_To_Regen, &numberOfChains, sizeof(uint32_t))); } __device__ inline void copyBitmap(unsigned char *sharedBitmap) { uint64_t *sharedBitmapCoalesce = (uint64_t *)sharedBitmap; uint64_t *deviceBitmapCoalesce = (uint64_t *)NTLM_Regenerate_constantBitmap; int a; if (threadIdx.x == 0) { for (a = 0; a < (8192 / 8); a++) { sharedBitmapCoalesce[a] = deviceBitmapCoalesce[a]; } } // Make sure everyone is here and done before we return. syncthreads(); } /* __global__ void RegenNTLMChainLen7(unsigned char *InitialPasswordArray, unsigned char *FoundPasswordArray, unsigned char *DeviceHashArray, uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, uint32_t StepsToRun, uint32_t charset_offset, unsigned char *successArray) { // Needed variables for generation uint32_t CurrentStep, PassCount, password_index; const int password_length = 7; // Hash variables uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; uint32_t a, b, c, d; // Word-width access to the arrays uint32_t *InitialArray32; uint32_t *FoundPassword32; uint32_t *DEVICE_Hashes_32; uint32_t search_index, search_high, search_low, hash_order_a, hash_order_mem, temp; // 32-bit accesses to the hash arrays InitialArray32 = (uint32_t *) InitialPasswordArray; FoundPassword32 = (uint32_t *) FoundPasswordArray; DEVICE_Hashes_32 = (uint32_t *) DeviceHashArray; __shared__ char charset[512]; __shared__ __align__(16) unsigned char sharedBitmap[8192]; // Generic "copy charset to shared memory" function copySingleCharsetToShared(charset, NTLM_Regenerate_Device_Charset_Constant); copyBitmap(sharedBitmap); // Figure out which password we are working on. password_index = ((blockIdx.x * blockDim.x + threadIdx.x) + (PasswordSpaceOffset * NTLM_Regenerate_Device_Number_Of_Threads)); // Return if this thread is working on something beyond the end of the password space if (password_index >= NTLM_Regenerate_Device_Number_Of_Chains_To_Regen) { return; } clearB0toB15(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); // Load b0/b1 out of memory b0 = (uint32_t) InitialArray32[0 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index]; b1 = (uint32_t) InitialArray32[1 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index]; // Set up the padding/size. for (PassCount = 0; PassCount < StepsToRun; PassCount++) { CurrentStep = PassCount + StartChainIndex; padMDHash(password_length * 2, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); CUDA_MD4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d); if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { { //printf("Bitmap hit.\n"); // Init binary search through global password space //printf("NTLM_Regenerate_Device_Number_Of_hashes: %d\n", NTLM_Regenerate_Device_Number_Of_Hashes); search_high = NTLM_Regenerate_Device_Number_Of_Hashes; search_low = 0; search_index = 0; while (search_low < search_high) { //printf("Search_low: %d\n", search_low); //printf("Search_high: %d\n", search_high); // Midpoint between search_high and search_low search_index = search_low + (search_high - search_low) / 2; //printf("Search_index: %d\n", search_index); // reorder from low endian to big endian for searching, as hashes are sorted by byte. temp = DEVICE_Hashes_32[4 * search_index]; hash_order_mem = (temp & 0xff) << 24 | ((temp >> 8) & 0xff) << 16 | ((temp >> 16) & 0xff) << 8 | ((temp >> 24) & 0xff); hash_order_a = (a & 0xff) << 24 | ((a >> 8) & 0xff) << 16 | ((a >> 16) & 0xff) << 8 | ((a >> 24) & 0xff); // Adjust search_high & search_low to work through space if (hash_order_mem < hash_order_a) { search_low = search_index + 1; } else { search_high = search_index; } if ((hash_order_a == hash_order_mem) && (search_low < NTLM_Regenerate_Device_Number_Of_Hashes)) { // Break out of the search loop - search_index is on a value break; } } // Yes - it's a goto. And it speeds up performance significantly (15%). // It stays. These values are already loaded. If they are not the same, // there is NO point to touching global memory again. if (hash_order_a != hash_order_mem) { goto next; } // We've broken out of the loop, search_index should be on a matching value // Loop while the search index is the same - linear search through this to find all possible // matching passwords. // We first need to move backwards to the beginning, as we may be in the middle of a set of matching hashes. // If we are index 0, do NOT subtract, as we will wrap and this goes poorly. while (search_index && (a == DEVICE_Hashes_32[(search_index - 1) * 4])) { search_index--; } //printf("Got search index of %d\n", search_index); while ((a == DEVICE_Hashes_32[search_index * 4])) { { if (b == DEVICE_Hashes_32[search_index * 4 + 1]) { if (c == DEVICE_Hashes_32[search_index * 4 + 2]) { if (d == DEVICE_Hashes_32[search_index * 4 + 3]) { if (password_length >= 1) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 0] = (b0 >> 0) & 0xff; if (password_length >= 2) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 1] = (b0 >> 8) & 0xff; if (password_length >= 3) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 2] = (b0 >> 16) & 0xff; if (password_length >= 4) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 3] = (b0 >> 24) & 0xff; if (password_length >= 5) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 4] = (b1 >> 0) & 0xff; if (password_length >= 6) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 5] = (b1 >> 8) & 0xff; if (password_length >= 7) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 6] = (b1 >> 16) & 0xff; if (password_length >= 8) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 7] = deviceCharset[p7 + (MAX_CHARSET_LENGTH * 7)]; if (password_length >= 9) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 8] = deviceCharset[p8 + (MAX_CHARSET_LENGTH * 8)]; if (password_length >= 10) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 9] = deviceCharset[p9 + (MAX_CHARSET_LENGTH * 9)]; if (password_length >= 11) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 10] = deviceCharset[p10 + (MAX_CHARSET_LENGTH * 10)]; if (password_length >= 12) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 11] = deviceCharset[p11 + (MAX_CHARSET_LENGTH * 11)]; if (password_length >= 13) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 12] = deviceCharset[p12 + (MAX_CHARSET_LENGTH * 12)]; if (password_length >= 14) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 13] = deviceCharset[p13 + (MAX_CHARSET_LENGTH * 13)]; if (password_length >= 15) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 14] = deviceCharset[p14 + (MAX_CHARSET_LENGTH * 14)]; if (password_length >= 16) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 15] = deviceCharset[p15 + (MAX_CHARSET_LENGTH * 15)]; successArray[search_index] = (unsigned char) 1; printf("FOUND PASSWORD:"); for (int i = 0; i < password_length; i++) { printf("%c", FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + i]); } printf("\n"); } } } } search_index++; } } } // This is where the goto goes. Notice the skipping of all the global memory access. next: reduceSingleCharsetNTLM(b0, b1, b2, b3, b4, a, b, c, d, CurrentStep, charset, charset_offset, password_length, NTLM_Regenerate_Device_Table_Index); charset_offset++; if (charset_offset >= NTLM_Regenerate_Device_Charset_Length) { charset_offset = 0; } } // Done with the number of steps we need to run // If we are done (or have somehow overflowed), store the result if (CurrentStep >= (NTLM_Regenerate_Device_Chain_Length - 1)) { // Do nothing. } // Else, store the b0/b1 values back to the initial array for the next loop else { InitialArray32[0 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index] = b0; InitialArray32[1 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index] = b1; } } */ #define CREATE_NTLM_REGEN_KERNEL(length) \ __global__ void RegenNTLMChainLen##length(unsigned char *InitialPasswordArray, unsigned char *FoundPasswordArray, \ unsigned char *DeviceHashArray, uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, \ uint32_t StepsToRun, uint32_t charset_offset, unsigned char *successArray) { \ uint32_t CurrentStep, PassCount, password_index; \ const int pass_length = length; \ uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; \ uint32_t a, b, c, d; \ uint32_t *InitialArray32; \ uint32_t *DEVICE_Hashes_32; \ uint32_t search_index, search_high, search_low, hash_order_a, hash_order_mem, temp; \ InitialArray32 = (uint32_t *) InitialPasswordArray; \ DEVICE_Hashes_32 = (uint32_t *) DeviceHashArray; \ __shared__ char charset[512]; \ __shared__ __align__(16) unsigned char sharedBitmap[8192]; \ copySingleCharsetToShared(charset, NTLM_Regenerate_Device_Charset_Constant); \ copyBitmap(sharedBitmap); \ password_index = ((blockIdx.x * blockDim.x + threadIdx.x) + (PasswordSpaceOffset * NTLM_Regenerate_Device_Number_Of_Threads)); \ if (password_index >= NTLM_Regenerate_Device_Number_Of_Chains_To_Regen) { \ return; \ } \ clearB0toB15(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \ LoadNTLMRegistersFromGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \ InitialArray32, NTLM_Regenerate_Device_Number_Of_Chains_To_Regen, password_index, pass_length); \ for (PassCount = 0; PassCount < StepsToRun; PassCount++) { \ CurrentStep = PassCount + StartChainIndex; \ padMDHash(pass_length * 2, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \ CUDA_MD4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d); \ if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \ { \ search_high = NTLM_Regenerate_Device_Number_Of_Hashes; \ search_low = 0; \ search_index = 0; \ while (search_low < search_high) { \ search_index = search_low + (search_high - search_low) / 2; \ temp = DEVICE_Hashes_32[4 * search_index]; \ hash_order_mem = (temp & 0xff) << 24 | ((temp >> 8) & 0xff) << 16 | ((temp >> 16) & 0xff) << 8 | ((temp >> 24) & 0xff); \ hash_order_a = (a & 0xff) << 24 | ((a >> 8) & 0xff) << 16 | ((a >> 16) & 0xff) << 8 | ((a >> 24) & 0xff); \ if (hash_order_mem < hash_order_a) { \ search_low = search_index + 1; \ } else { \ search_high = search_index; \ } \ if ((hash_order_a == hash_order_mem) && (search_low < NTLM_Regenerate_Device_Number_Of_Hashes)) { \ break; \ } \ } \ if (hash_order_a != hash_order_mem) { \ goto next; \ } \ while (search_index && (a == DEVICE_Hashes_32[(search_index - 1) * 4])) { \ search_index--; \ } \ while ((a == DEVICE_Hashes_32[search_index * 4])) { \ { \ if (b == DEVICE_Hashes_32[search_index * 4 + 1]) { \ if (c == DEVICE_Hashes_32[search_index * 4 + 2]) { \ if (d == DEVICE_Hashes_32[search_index * 4 + 3]) { \ if (pass_length >= 1) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 0] = (b0 >> 0) & 0xff; \ if (pass_length >= 2) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 1] = (b0 >> 16) & 0xff; \ if (pass_length >= 3) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 2] = (b1 >> 0) & 0xff; \ if (pass_length >= 4) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 3] = (b1 >> 16) & 0xff; \ if (pass_length >= 5) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 4] = (b2 >> 0) & 0xff; \ if (pass_length >= 6) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 5] = (b2 >> 16) & 0xff; \ if (pass_length >= 7) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 6] = (b3 >> 0) & 0xff; \ if (pass_length >= 8) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 7] = (b3 >> 16) & 0xff; \ if (pass_length >= 9) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 8] = (b4 >> 0) & 0xff; \ if (pass_length >= 10) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 9] = (b4 >> 16) & 0xff; \ if (pass_length >= 11) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 10] = (b5 >> 0) & 0xff; \ if (pass_length >= 12) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 11] = (b5 >> 16) & 0xff; \ if (pass_length >= 13) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 12] = (b6 >> 0) & 0xff; \ if (pass_length >= 14) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 13] = (b6 >> 16) & 0xff; \ if (pass_length >= 15) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 14] = (b7 >> 0) & 0xff; \ if (pass_length >= 16) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 15] = (b7 >> 16) & 0xff; \ successArray[search_index] = (unsigned char) 1; \ } \ } \ } \ } \ search_index++; \ } \ } \ } \ next: \ reduceSingleCharsetNTLM(b0, b1, b2, b3, b4, a, b, c, d, CurrentStep, charset, charset_offset, pass_length, NTLM_Regenerate_Device_Table_Index); \ charset_offset++; \ if (charset_offset >= NTLM_Regenerate_Device_Charset_Length) { \ charset_offset = 0; \ } \ } \ if (CurrentStep >= (NTLM_Regenerate_Device_Chain_Length - 1)) { \ } \ else { \ SaveNTLMRegistersIntoGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \ InitialArray32, NTLM_Regenerate_Device_Number_Of_Chains_To_Regen, password_index, pass_length); \ } \ } CREATE_NTLM_REGEN_KERNEL(6); CREATE_NTLM_REGEN_KERNEL(7); CREATE_NTLM_REGEN_KERNEL(8); CREATE_NTLM_REGEN_KERNEL(9); CREATE_NTLM_REGEN_KERNEL(10); extern "C" void LaunchNTLMRegenerateKernel(int PasswordLength, int CUDA_Blocks, int CUDA_Threads, unsigned char *InitialPasswordArray, unsigned char *FoundPasswordArray, unsigned char *DeviceHashArray, uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, uint32_t StepsToRun, uint32_t charset_offset, unsigned char *successArray) { switch (PasswordLength) { case 6: //printf("Launching len6 kernel\n"); hipLaunchKernelGGL(( RegenNTLMChainLen6) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0, InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 7: hipLaunchKernelGGL(( RegenNTLMChainLen7) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0, InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 8: hipLaunchKernelGGL(( RegenNTLMChainLen8) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0, InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 9: hipLaunchKernelGGL(( RegenNTLMChainLen9) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0, InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 10: hipLaunchKernelGGL(( RegenNTLMChainLen10) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0, InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; default: printf("Password length %d not supported!", PasswordLength); exit(1); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if( hipSuccess != err) { printf("Cuda error: %s.\n", hipGetErrorString( err) ); exit(1);; } }
b08c84e8824c004daaeac6cb6f8ce49a055d6d70.cu
// This is here so Netbeans doesn't error-spam my IDE #if !defined(__CUDACC__) // define the keywords, so that the IDE does not complain about them #define __global__ #define __device__ #define __shared__ #define __constant__ #define blockIdx.x 1 #define blockDim.x 1 #define threadIdx.x 1 #endif #include <cuda.h> #include <cuda_runtime_api.h> #include "GRT_Common/GRTCommon.h" #include "CUDA_Common/CUDA_SAFE_CALL.h" // These will be the same for all GPUs working on a hash. __device__ __constant__ unsigned char NTLM_Regenerate_Device_Charset_Constant[512]; // Constant space for charset __device__ __constant__ uint32_t NTLM_Regenerate_Device_Charset_Length; // Character set length __device__ __constant__ uint32_t NTLM_Regenerate_Device_Chain_Length; // May as well pull it from constant memory... faster when cached. __device__ __constant__ uint32_t NTLM_Regenerate_Device_Table_Index; __device__ __constant__ uint32_t NTLM_Regenerate_Device_Number_Of_Threads; // It needs this, and can't easily calculate it __device__ __constant__ uint32_t NTLM_Regenerate_Device_Number_Of_Chains_To_Regen; __device__ __constant__ uint32_t NTLM_Regenerate_Device_Number_Of_Hashes; __device__ __constant__ unsigned char NTLM_Regenerate_constantBitmap[8192]; // for lookups #include "../../inc/CUDA_Common/CUDA_MD4.h" #include "../../inc/CUDA_Common/Hash_Common.h" #include "../../inc/GRT_CUDA_device/CUDA_Reduction_Functions.h" #include "../../inc/GRT_CUDA_device/CUDA_Load_Store_Registers.h" // Copy the shared variables to the host extern "C" void copyNTLMRegenerateDataToConstant(char *hostCharset, uint32_t hostCharsetLength, uint32_t hostChainLength, uint32_t hostTableIndex, uint32_t hostNumberOfThreads, unsigned char *hostBitmap, uint32_t hostNumberOfHashes) { CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_Device_Charset_Constant, hostCharset, 512)); CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_Device_Charset_Length, &hostCharsetLength, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_Device_Chain_Length, &hostChainLength, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_Device_Table_Index, &hostTableIndex, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_Device_Number_Of_Threads, &hostNumberOfThreads, sizeof(uint32_t))); CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_constantBitmap, hostBitmap, 8192)); CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_Device_Number_Of_Hashes, &hostNumberOfHashes, sizeof(uint32_t))); } extern "C" void setNTLMRegenerateNumberOfChains(uint32_t numberOfChains) { CH_CUDA_SAFE_CALL(cudaMemcpyToSymbol(NTLM_Regenerate_Device_Number_Of_Chains_To_Regen, &numberOfChains, sizeof(uint32_t))); } __device__ inline void copyBitmap(unsigned char *sharedBitmap) { uint64_t *sharedBitmapCoalesce = (uint64_t *)sharedBitmap; uint64_t *deviceBitmapCoalesce = (uint64_t *)NTLM_Regenerate_constantBitmap; int a; if (threadIdx.x == 0) { for (a = 0; a < (8192 / 8); a++) { sharedBitmapCoalesce[a] = deviceBitmapCoalesce[a]; } } // Make sure everyone is here and done before we return. syncthreads(); } /* __global__ void RegenNTLMChainLen7(unsigned char *InitialPasswordArray, unsigned char *FoundPasswordArray, unsigned char *DeviceHashArray, uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, uint32_t StepsToRun, uint32_t charset_offset, unsigned char *successArray) { // Needed variables for generation uint32_t CurrentStep, PassCount, password_index; const int password_length = 7; // Hash variables uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; uint32_t a, b, c, d; // Word-width access to the arrays uint32_t *InitialArray32; uint32_t *FoundPassword32; uint32_t *DEVICE_Hashes_32; uint32_t search_index, search_high, search_low, hash_order_a, hash_order_mem, temp; // 32-bit accesses to the hash arrays InitialArray32 = (uint32_t *) InitialPasswordArray; FoundPassword32 = (uint32_t *) FoundPasswordArray; DEVICE_Hashes_32 = (uint32_t *) DeviceHashArray; __shared__ char charset[512]; __shared__ __align__(16) unsigned char sharedBitmap[8192]; // Generic "copy charset to shared memory" function copySingleCharsetToShared(charset, NTLM_Regenerate_Device_Charset_Constant); copyBitmap(sharedBitmap); // Figure out which password we are working on. password_index = ((blockIdx.x * blockDim.x + threadIdx.x) + (PasswordSpaceOffset * NTLM_Regenerate_Device_Number_Of_Threads)); // Return if this thread is working on something beyond the end of the password space if (password_index >= NTLM_Regenerate_Device_Number_Of_Chains_To_Regen) { return; } clearB0toB15(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); // Load b0/b1 out of memory b0 = (uint32_t) InitialArray32[0 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index]; b1 = (uint32_t) InitialArray32[1 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index]; // Set up the padding/size. for (PassCount = 0; PassCount < StepsToRun; PassCount++) { CurrentStep = PassCount + StartChainIndex; padMDHash(password_length * 2, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); CUDA_MD4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d); if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { { //printf("Bitmap hit.\n"); // Init binary search through global password space //printf("NTLM_Regenerate_Device_Number_Of_hashes: %d\n", NTLM_Regenerate_Device_Number_Of_Hashes); search_high = NTLM_Regenerate_Device_Number_Of_Hashes; search_low = 0; search_index = 0; while (search_low < search_high) { //printf("Search_low: %d\n", search_low); //printf("Search_high: %d\n", search_high); // Midpoint between search_high and search_low search_index = search_low + (search_high - search_low) / 2; //printf("Search_index: %d\n", search_index); // reorder from low endian to big endian for searching, as hashes are sorted by byte. temp = DEVICE_Hashes_32[4 * search_index]; hash_order_mem = (temp & 0xff) << 24 | ((temp >> 8) & 0xff) << 16 | ((temp >> 16) & 0xff) << 8 | ((temp >> 24) & 0xff); hash_order_a = (a & 0xff) << 24 | ((a >> 8) & 0xff) << 16 | ((a >> 16) & 0xff) << 8 | ((a >> 24) & 0xff); // Adjust search_high & search_low to work through space if (hash_order_mem < hash_order_a) { search_low = search_index + 1; } else { search_high = search_index; } if ((hash_order_a == hash_order_mem) && (search_low < NTLM_Regenerate_Device_Number_Of_Hashes)) { // Break out of the search loop - search_index is on a value break; } } // Yes - it's a goto. And it speeds up performance significantly (15%). // It stays. These values are already loaded. If they are not the same, // there is NO point to touching global memory again. if (hash_order_a != hash_order_mem) { goto next; } // We've broken out of the loop, search_index should be on a matching value // Loop while the search index is the same - linear search through this to find all possible // matching passwords. // We first need to move backwards to the beginning, as we may be in the middle of a set of matching hashes. // If we are index 0, do NOT subtract, as we will wrap and this goes poorly. while (search_index && (a == DEVICE_Hashes_32[(search_index - 1) * 4])) { search_index--; } //printf("Got search index of %d\n", search_index); while ((a == DEVICE_Hashes_32[search_index * 4])) { { if (b == DEVICE_Hashes_32[search_index * 4 + 1]) { if (c == DEVICE_Hashes_32[search_index * 4 + 2]) { if (d == DEVICE_Hashes_32[search_index * 4 + 3]) { if (password_length >= 1) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 0] = (b0 >> 0) & 0xff; if (password_length >= 2) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 1] = (b0 >> 8) & 0xff; if (password_length >= 3) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 2] = (b0 >> 16) & 0xff; if (password_length >= 4) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 3] = (b0 >> 24) & 0xff; if (password_length >= 5) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 4] = (b1 >> 0) & 0xff; if (password_length >= 6) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 5] = (b1 >> 8) & 0xff; if (password_length >= 7) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 6] = (b1 >> 16) & 0xff; if (password_length >= 8) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 7] = deviceCharset[p7 + (MAX_CHARSET_LENGTH * 7)]; if (password_length >= 9) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 8] = deviceCharset[p8 + (MAX_CHARSET_LENGTH * 8)]; if (password_length >= 10) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 9] = deviceCharset[p9 + (MAX_CHARSET_LENGTH * 9)]; if (password_length >= 11) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 10] = deviceCharset[p10 + (MAX_CHARSET_LENGTH * 10)]; if (password_length >= 12) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 11] = deviceCharset[p11 + (MAX_CHARSET_LENGTH * 11)]; if (password_length >= 13) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 12] = deviceCharset[p12 + (MAX_CHARSET_LENGTH * 12)]; if (password_length >= 14) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 13] = deviceCharset[p13 + (MAX_CHARSET_LENGTH * 13)]; if (password_length >= 15) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 14] = deviceCharset[p14 + (MAX_CHARSET_LENGTH * 14)]; if (password_length >= 16) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 15] = deviceCharset[p15 + (MAX_CHARSET_LENGTH * 15)]; successArray[search_index] = (unsigned char) 1; printf("FOUND PASSWORD:"); for (int i = 0; i < password_length; i++) { printf("%c", FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + i]); } printf("\n"); } } } } search_index++; } } } // This is where the goto goes. Notice the skipping of all the global memory access. next: reduceSingleCharsetNTLM(b0, b1, b2, b3, b4, a, b, c, d, CurrentStep, charset, charset_offset, password_length, NTLM_Regenerate_Device_Table_Index); charset_offset++; if (charset_offset >= NTLM_Regenerate_Device_Charset_Length) { charset_offset = 0; } } // Done with the number of steps we need to run // If we are done (or have somehow overflowed), store the result if (CurrentStep >= (NTLM_Regenerate_Device_Chain_Length - 1)) { // Do nothing. } // Else, store the b0/b1 values back to the initial array for the next loop else { InitialArray32[0 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index] = b0; InitialArray32[1 * NTLM_Regenerate_Device_Number_Of_Chains_To_Regen + password_index] = b1; } } */ #define CREATE_NTLM_REGEN_KERNEL(length) \ __global__ void RegenNTLMChainLen##length(unsigned char *InitialPasswordArray, unsigned char *FoundPasswordArray, \ unsigned char *DeviceHashArray, uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, \ uint32_t StepsToRun, uint32_t charset_offset, unsigned char *successArray) { \ uint32_t CurrentStep, PassCount, password_index; \ const int pass_length = length; \ uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; \ uint32_t a, b, c, d; \ uint32_t *InitialArray32; \ uint32_t *DEVICE_Hashes_32; \ uint32_t search_index, search_high, search_low, hash_order_a, hash_order_mem, temp; \ InitialArray32 = (uint32_t *) InitialPasswordArray; \ DEVICE_Hashes_32 = (uint32_t *) DeviceHashArray; \ __shared__ char charset[512]; \ __shared__ __align__(16) unsigned char sharedBitmap[8192]; \ copySingleCharsetToShared(charset, NTLM_Regenerate_Device_Charset_Constant); \ copyBitmap(sharedBitmap); \ password_index = ((blockIdx.x * blockDim.x + threadIdx.x) + (PasswordSpaceOffset * NTLM_Regenerate_Device_Number_Of_Threads)); \ if (password_index >= NTLM_Regenerate_Device_Number_Of_Chains_To_Regen) { \ return; \ } \ clearB0toB15(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \ LoadNTLMRegistersFromGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \ InitialArray32, NTLM_Regenerate_Device_Number_Of_Chains_To_Regen, password_index, pass_length); \ for (PassCount = 0; PassCount < StepsToRun; PassCount++) { \ CurrentStep = PassCount + StartChainIndex; \ padMDHash(pass_length * 2, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \ CUDA_MD4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d); \ if ((sharedBitmap[(a & 0x0000ffff) >> 3] >> (a & 0x00000007)) & 0x00000001) { \ { \ search_high = NTLM_Regenerate_Device_Number_Of_Hashes; \ search_low = 0; \ search_index = 0; \ while (search_low < search_high) { \ search_index = search_low + (search_high - search_low) / 2; \ temp = DEVICE_Hashes_32[4 * search_index]; \ hash_order_mem = (temp & 0xff) << 24 | ((temp >> 8) & 0xff) << 16 | ((temp >> 16) & 0xff) << 8 | ((temp >> 24) & 0xff); \ hash_order_a = (a & 0xff) << 24 | ((a >> 8) & 0xff) << 16 | ((a >> 16) & 0xff) << 8 | ((a >> 24) & 0xff); \ if (hash_order_mem < hash_order_a) { \ search_low = search_index + 1; \ } else { \ search_high = search_index; \ } \ if ((hash_order_a == hash_order_mem) && (search_low < NTLM_Regenerate_Device_Number_Of_Hashes)) { \ break; \ } \ } \ if (hash_order_a != hash_order_mem) { \ goto next; \ } \ while (search_index && (a == DEVICE_Hashes_32[(search_index - 1) * 4])) { \ search_index--; \ } \ while ((a == DEVICE_Hashes_32[search_index * 4])) { \ { \ if (b == DEVICE_Hashes_32[search_index * 4 + 1]) { \ if (c == DEVICE_Hashes_32[search_index * 4 + 2]) { \ if (d == DEVICE_Hashes_32[search_index * 4 + 3]) { \ if (pass_length >= 1) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 0] = (b0 >> 0) & 0xff; \ if (pass_length >= 2) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 1] = (b0 >> 16) & 0xff; \ if (pass_length >= 3) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 2] = (b1 >> 0) & 0xff; \ if (pass_length >= 4) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 3] = (b1 >> 16) & 0xff; \ if (pass_length >= 5) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 4] = (b2 >> 0) & 0xff; \ if (pass_length >= 6) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 5] = (b2 >> 16) & 0xff; \ if (pass_length >= 7) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 6] = (b3 >> 0) & 0xff; \ if (pass_length >= 8) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 7] = (b3 >> 16) & 0xff; \ if (pass_length >= 9) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 8] = (b4 >> 0) & 0xff; \ if (pass_length >= 10) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 9] = (b4 >> 16) & 0xff; \ if (pass_length >= 11) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 10] = (b5 >> 0) & 0xff; \ if (pass_length >= 12) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 11] = (b5 >> 16) & 0xff; \ if (pass_length >= 13) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 12] = (b6 >> 0) & 0xff; \ if (pass_length >= 14) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 13] = (b6 >> 16) & 0xff; \ if (pass_length >= 15) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 14] = (b7 >> 0) & 0xff; \ if (pass_length >= 16) FoundPasswordArray[search_index * MAX_PASSWORD_LENGTH + 15] = (b7 >> 16) & 0xff; \ successArray[search_index] = (unsigned char) 1; \ } \ } \ } \ } \ search_index++; \ } \ } \ } \ next: \ reduceSingleCharsetNTLM(b0, b1, b2, b3, b4, a, b, c, d, CurrentStep, charset, charset_offset, pass_length, NTLM_Regenerate_Device_Table_Index); \ charset_offset++; \ if (charset_offset >= NTLM_Regenerate_Device_Charset_Length) { \ charset_offset = 0; \ } \ } \ if (CurrentStep >= (NTLM_Regenerate_Device_Chain_Length - 1)) { \ } \ else { \ SaveNTLMRegistersIntoGlobalMemory(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15, \ InitialArray32, NTLM_Regenerate_Device_Number_Of_Chains_To_Regen, password_index, pass_length); \ } \ } CREATE_NTLM_REGEN_KERNEL(6); CREATE_NTLM_REGEN_KERNEL(7); CREATE_NTLM_REGEN_KERNEL(8); CREATE_NTLM_REGEN_KERNEL(9); CREATE_NTLM_REGEN_KERNEL(10); extern "C" void LaunchNTLMRegenerateKernel(int PasswordLength, int CUDA_Blocks, int CUDA_Threads, unsigned char *InitialPasswordArray, unsigned char *FoundPasswordArray, unsigned char *DeviceHashArray, uint32_t PasswordSpaceOffset, uint32_t StartChainIndex, uint32_t StepsToRun, uint32_t charset_offset, unsigned char *successArray) { switch (PasswordLength) { case 6: //printf("Launching len6 kernel\n"); RegenNTLMChainLen6 <<< CUDA_Blocks, CUDA_Threads >>> (InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 7: RegenNTLMChainLen7 <<< CUDA_Blocks, CUDA_Threads >>> (InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 8: RegenNTLMChainLen8 <<< CUDA_Blocks, CUDA_Threads >>> (InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 9: RegenNTLMChainLen9 <<< CUDA_Blocks, CUDA_Threads >>> (InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; case 10: RegenNTLMChainLen10 <<< CUDA_Blocks, CUDA_Threads >>> (InitialPasswordArray, FoundPasswordArray, DeviceHashArray, PasswordSpaceOffset, StartChainIndex, StepsToRun, charset_offset, successArray); break; default: printf("Password length %d not supported!", PasswordLength); exit(1); } cudaThreadSynchronize(); cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { printf("Cuda error: %s.\n", cudaGetErrorString( err) ); exit(1);; } }
520164a70fbdc5b1bc6f75fa1408ddbf151b3234.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Modification of Ingemar Ragnemalm "Real Hello World!" program // To compile execute below: // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <stdio.h> // N = data size... // Changing this will increase array size and will change number of blocks #define N 128 #define BLOCK_SIZE 32 #define NUM_BLOCKS N/BLOCK_SIZE #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) /* Declare statically four arrays of ARRAY_SIZE each */ unsigned int cpu_block[ARRAY_SIZE]; __global__ void hello(int * block) { const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[thread_idx] = threadIdx.x; } void main_sub() { /* Declare pointers for GPU based params */ int *gpu_block; hipMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); hipMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); /* Execute our kernel */ hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, gpu_block); /* Free the arrays on the GPU as now we're done with them */ hipMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost ); hipFree(gpu_block); /* Iterate through the arrays and print */ for(unsigned int i = 0; i < ARRAY_SIZE; i++) { printf("Calculated Thread: - Block: %2u\n",cpu_block[i]); } } int main() { main_sub(); return EXIT_SUCCESS; }
520164a70fbdc5b1bc6f75fa1408ddbf151b3234.cu
// Modification of Ingemar Ragnemalm "Real Hello World!" program // To compile execute below: // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <stdio.h> // N = data size... // Changing this will increase array size and will change number of blocks #define N 128 #define BLOCK_SIZE 32 #define NUM_BLOCKS N/BLOCK_SIZE #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) /* Declare statically four arrays of ARRAY_SIZE each */ unsigned int cpu_block[ARRAY_SIZE]; __global__ void hello(int * block) { const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[thread_idx] = threadIdx.x; } void main_sub() { /* Declare pointers for GPU based params */ int *gpu_block; cudaMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); cudaMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); /* Execute our kernel */ hello<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block); /* Free the arrays on the GPU as now we're done with them */ cudaMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); cudaFree(gpu_block); /* Iterate through the arrays and print */ for(unsigned int i = 0; i < ARRAY_SIZE; i++) { printf("Calculated Thread: - Block: %2u\n",cpu_block[i]); } } int main() { main_sub(); return EXIT_SUCCESS; }
e3143cf34b290bea68f4616822968a23c92629a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bits/stdc++.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #define to_ptr(x) thrust::raw_pointer_cast(&x[0]) #define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin()) #define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos)) #define def_dvec(t) thrust::device_vector<t> using namespace std; const int BLOCK_SIZE = 256; const int VEC_SIZE = 12248; __global__ void init(){} __device__ float dothings(int t,int sz, float *input){ float ans = 0; for(int i=0;i<12;++i){ ans += input[(i+t)%sz]; } return ans; } __global__ void process(int N_step, int N_inst, float *input, float *output){ int g_id = blockIdx.x * blockDim.x + threadIdx.x; if(g_id >= N_inst) return; float local_data[VEC_SIZE]; float ans = 0.; for(int i=0;i<VEC_SIZE;++i) local_data[i] = input[VEC_SIZE * g_id + i]; for(int t=0;t<N_step;++t){ ans += dothings(t, VEC_SIZE, local_data); } output[g_id] = ans; return; } int main(int argc, char *argv[]){ srand(0); int num_inst = 1024, num_step = 1024; if(argc > 1) num_step = stoi(argv[1]); /* For measuing the time */ hipEvent_t start, stop; float cuda_time; hipEventCreate(&start); // creating the event 1 hipEventCreate(&stop); // creating the event 2 vector<float> hin(VEC_SIZE * num_step), hout(num_inst); def_dvec(float) din(VEC_SIZE * num_step), dout(num_inst); generate(hin.begin(), hin.end(), [](){return float(rand())/RAND_MAX;}); int n_block = (num_inst + BLOCK_SIZE - 1)/BLOCK_SIZE; hipLaunchKernelGGL(( init), dim3(n_block),dim3(BLOCK_SIZE), 0, 0, ); gpu_copy(hin, din); hipEventRecord(start, 0); hipLaunchKernelGGL(( process), dim3(n_block), dim3(BLOCK_SIZE), 0, 0, num_step, num_inst, to_ptr(din), to_ptr(dout)); hipEventRecord(stop, 0); // Stop time measuring hipEventSynchronize(stop); hipEventElapsedTime(&cuda_time, start, stop); // Saving the time measured cout<<"Time Usage for running the kernel is: "<<cuda_time/1000<<"s"<<endl; gpu_copy(dout, hout); cout<<"Showing the answer:"<<endl; for(int i=0;i<num_inst;i+=num_inst/10) cout<<hout[i]<<' '; cout<<endl; return 0; }
e3143cf34b290bea68f4616822968a23c92629a3.cu
#include <bits/stdc++.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #define to_ptr(x) thrust::raw_pointer_cast(&x[0]) #define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin()) #define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos)) #define def_dvec(t) thrust::device_vector<t> using namespace std; const int BLOCK_SIZE = 256; const int VEC_SIZE = 12248; __global__ void init(){} __device__ float dothings(int t,int sz, float *input){ float ans = 0; for(int i=0;i<12;++i){ ans += input[(i+t)%sz]; } return ans; } __global__ void process(int N_step, int N_inst, float *input, float *output){ int g_id = blockIdx.x * blockDim.x + threadIdx.x; if(g_id >= N_inst) return; float local_data[VEC_SIZE]; float ans = 0.; for(int i=0;i<VEC_SIZE;++i) local_data[i] = input[VEC_SIZE * g_id + i]; for(int t=0;t<N_step;++t){ ans += dothings(t, VEC_SIZE, local_data); } output[g_id] = ans; return; } int main(int argc, char *argv[]){ srand(0); int num_inst = 1024, num_step = 1024; if(argc > 1) num_step = stoi(argv[1]); /* For measuing the time */ cudaEvent_t start, stop; float cuda_time; cudaEventCreate(&start); // creating the event 1 cudaEventCreate(&stop); // creating the event 2 vector<float> hin(VEC_SIZE * num_step), hout(num_inst); def_dvec(float) din(VEC_SIZE * num_step), dout(num_inst); generate(hin.begin(), hin.end(), [](){return float(rand())/RAND_MAX;}); int n_block = (num_inst + BLOCK_SIZE - 1)/BLOCK_SIZE; init<<<n_block,BLOCK_SIZE>>>(); gpu_copy(hin, din); cudaEventRecord(start, 0); process<<<n_block, BLOCK_SIZE>>>(num_step, num_inst, to_ptr(din), to_ptr(dout)); cudaEventRecord(stop, 0); // Stop time measuring cudaEventSynchronize(stop); cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured cout<<"Time Usage for running the kernel is: "<<cuda_time/1000<<"s"<<endl; gpu_copy(dout, hout); cout<<"Showing the answer:"<<endl; for(int i=0;i<num_inst;i+=num_inst/10) cout<<hout[i]<<' '; cout<<endl; return 0; }
16a8a1de0f9a539d2e315f672c9e8ad1b266928a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by on 2020/11/10. // #include "consts.h" #include "utils.h" #include "gpu_utils.h" __constant__ float dev_coeff[R + 1]; __global__ void naive_kernel(const float *src, float *dst, const int NX, const int NY, const int NZ) { int gx = blockIdx.x * blockDim.x + threadIdx.x; int gy = blockIdx.y * blockDim.y + threadIdx.y; int gz = blockIdx.z * blockDim.z + threadIdx.z; if(gx >= R && gx < NX - R && gy >= R && gy < NY - R && gz >= R && gz < NZ - R) { const int stride_Z = NX * NY; const int stride_Y = NX; const int goffset = gz * stride_Z + gy * stride_Y + gx; float value = dev_coeff[0] * src[goffset]; //left #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset - ir]; } //right #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset + ir]; } //front #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset - r * stride_Y]; } //back #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset + r * stride_Y]; } //top #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset + r * stride_Z]; } //down #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset - r * stride_Z]; } dst[goffset] = value; } }
16a8a1de0f9a539d2e315f672c9e8ad1b266928a.cu
// // Created by 王张苏徽 on 2020/11/10. // #include "consts.h" #include "utils.h" #include "gpu_utils.h" __constant__ float dev_coeff[R + 1]; __global__ void naive_kernel(const float *src, float *dst, const int NX, const int NY, const int NZ) { int gx = blockIdx.x * blockDim.x + threadIdx.x; int gy = blockIdx.y * blockDim.y + threadIdx.y; int gz = blockIdx.z * blockDim.z + threadIdx.z; if(gx >= R && gx < NX - R && gy >= R && gy < NY - R && gz >= R && gz < NZ - R) { const int stride_Z = NX * NY; const int stride_Y = NX; const int goffset = gz * stride_Z + gy * stride_Y + gx; float value = dev_coeff[0] * src[goffset]; //left #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset - ir]; } //right #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset + ir]; } //front #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset - r * stride_Y]; } //back #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset + r * stride_Y]; } //top #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset + r * stride_Z]; } //down #pragma unroll 4 for(int ir = 1; ir <= R; ir++) { value += dev_coeff[ir] * src[goffset - r * stride_Z]; } dst[goffset] = value; } }
b7be925bbea332f10c2a8ff0f010bddb335f2f38.hip
// !!! This is a file automatically generated by hipify!!! // General #include <iostream> #include <algorithm> #include <sstream> #include <assert.h> // Warpkernel #include "warpkernel.hpp" // cusp #include <cusp/coo_matrix.h> #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include <cusp/detail/timer.h> #include <cusp/hyb_matrix.h> // mgpu #include "../benchmark.h" // boost // stats #include <boost/accumulators/accumulators.hpp> #include <boost/accumulators/statistics/stats.hpp> #include <boost/accumulators/statistics/mean.hpp> #include <boost/accumulators/statistics/min.hpp> #define ValueType double #define IndexType int #define DeviceSpace cusp::device_memory #define CPUSpace cusp::host_memory struct rand_float { ValueType operator() () { return ((ValueType)(rand() % 100))/100. - 0.3; } }; int main(int argc, char *argv[]) { std::string matrixfilename = argv[1]; int ntests = 1; if (argc == 3) ntests = atoi(argv[2]); cusp::coo_matrix<IndexType, ValueType, CPUSpace> B; cusp::io::read_matrix_market_file(B, matrixfilename.c_str()); cusp::csr_matrix<IndexType, ValueType, CPUSpace> A = B; uint N = A.num_cols; uint nz = A.num_entries; // open up data file std::string filename; size_t pos = matrixfilename.find_last_of("/"); std::string matrixname; if (pos != std::string::npos ) matrixname.assign(matrixfilename.begin()+pos+1, matrixfilename.end()); else matrixname = matrixfilename; std::string datapath = "./data/" + matrixname + "_results_cusparsemgpu.txt"; std::cout << "Starting data file = " << datapath << std::endl; std::ofstream datafile(datapath.c_str()); warpkernel::startDatafile(datafile, nz,N,ntests); cusp::array1d<ValueType, CPUSpace> x(N,0); // thrust::generate(x.begin(),x.end(), rand_float()); cusp::array1d<ValueType, CPUSpace> y(N); // setup multiple run mean accumulators // find global minimum and maximum boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::min> > mgpustats; int fastestValuesPerThread=4; bool lastiter = true; // cusp multiplication { boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; cusp::csr_matrix<IndexType, ValueType, DeviceSpace> A1 = A; cusp::array1d<ValueType, DeviceSpace> dx = x; cusp::array1d<ValueType, DeviceSpace> dy = y; for (int t = 0; t < ntests; t++) { cusp::detail::timer cusptimer; cusptimer.start(); cusp::multiply(A1,dx,dy); ValueType measuredtime = cusptimer.seconds_elapsed(); statstime(measuredtime); } y = dy; if (lastiter) { std::cout << "cusp gpu time " << std::scientific << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "cusp-csr", boost::accumulators::mean(statstime), -1, -1, -1, -1); } } // Modern GPU Benchmarks { cusp::array1d<ValueType, DeviceSpace> dx = x; sparseEngine_t mgpu; sparseStatus_t status = sparseCreate("/home/jonathan/mgpu/sparse/src/cubin/", &mgpu); if(SPARSE_STATUS_SUCCESS != status) { printf("Could not create MGPU Sparse object: %s.\n", sparseStatusString(status)); return 0; } std::auto_ptr<SparseMatrix<double> > m; std::string err; bool success = ReadSparseMatrix(matrixfilename.c_str(), SparseOrderRow, &m, err); std::vector<int> rowIndices, colIndices; std::vector<double> sparseValues; DeinterleaveMatrix(*m, rowIndices, colIndices, sparseValues); int threads[] = {4,6,8,10,12,16}; for(int vals = 0; vals < 6 ; vals++) { int valuesPerThread = threads[vals]; sparseMat_t mat = 0; status = sparseMatCreate(mgpu, m->height, m->width, SPARSE_PREC_REAL8, valuesPerThread, SPARSE_INPUT_CSR, (int)m->elements.size(), &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), &mat); if(SPARSE_STATUS_SUCCESS != status) return status; cusp::array1d<ValueType, DeviceSpace> dy(N); boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for (int i=0;i<ntests;i++) { cusp::detail::timer mgputime; mgputime.start(); status = sparseMatVecDMul(mgpu, 1.0, mat, (hipDeviceptr_t) thrust::raw_pointer_cast(&dx[0]), 0.0, (hipDeviceptr_t)thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = mgputime.seconds_elapsed(); statstime(measuretime); } std::cout << "mgpu time" << valuesPerThread << "\t" << boost::accumulators::mean(statstime) << " s " << " status :" << status << std::endl; cusp::array1d<ValueType, CPUSpace> ycheck = dy; bool check = true; for(int i=0;i<N;i++) { if (abs(y[i]-ycheck[i]) > 1E-5) { check = false; break; } } if (check) { std::stringstream kernelname; kernelname << "mgpu_" << valuesPerThread; warpkernel::addData(datafile, (char *) (kernelname.str()).c_str(), boost::accumulators::mean(statstime) , -1,-1,-1,-1); mgpustats(boost::accumulators::mean(statstime)); if (boost::accumulators::min(mgpustats) == boost::accumulators::mean(statstime)) fastestValuesPerThread = valuesPerThread; } } } // CUSPARSE { cusp::array1d<ValueType, DeviceSpace> dx = x; hipsparseStatus_t status2; hipsparseHandle_t handle = 0; // cusparse library handle hipsparseMatDescr_t descra; /* initialize cusparse library */ status2 = hipsparseCreate(&handle); if (status2 != HIPSPARSE_STATUS_SUCCESS) { return EXIT_FAILURE; } /* create and setup matrix descriptor */ status2= hipsparseCreateMatDescr(&descra); if (status2 != HIPSPARSE_STATUS_SUCCESS) { printf("Matrix descriptor initialization failed\n"); return EXIT_FAILURE; } hipsparseSetMatType(descra,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descra,HIPSPARSE_INDEX_BASE_ZERO); cusp::array1d<ValueType, DeviceSpace> dy(N); cusp::csr_matrix<IndexType, ValueType, DeviceSpace> dA = A; /* exercise Level 2 routines (csrmv) */ boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for(int i=0;i<ntests;i++){ cusp::detail::timer cusparse; cusparse.start(); status2 = hipsparseDcsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, 1.0, descra, thrust::raw_pointer_cast(&dA.values[0]), thrust::raw_pointer_cast(&dA.row_offsets[0]), thrust::raw_pointer_cast(&dA.column_indices[0]), thrust::raw_pointer_cast(&dx[0]), 0.0, thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = cusparse.seconds_elapsed(); statstime(measuretime); } printf("%f time elapsed for multiplication cusparse\n", boost::accumulators::mean(statstime) ); cusp::array1d<ValueType, CPUSpace> ycheck = dy; bool check = true; for(int i=0;i<N;i++) { if (abs(y[i]-ycheck[i]) > 1E-5) { check = false; break; } } if (check) { std::stringstream kernelname; kernelname << "cusparse"; warpkernel::addData(datafile, (char *) (kernelname.str()).c_str(), boost::accumulators::mean(statstime) , -1,-1,-1,-1); } } std::stringstream mgpuname; mgpuname << "mgpuall_"<< fastestValuesPerThread; warpkernel::addData(datafile, (char*)(mgpuname.str()).c_str(), boost::accumulators::min(mgpustats), -1, -1, -1, -1); }
b7be925bbea332f10c2a8ff0f010bddb335f2f38.cu
// General #include <iostream> #include <algorithm> #include <sstream> #include <assert.h> // Warpkernel #include "warpkernel.hpp" // cusp #include <cusp/coo_matrix.h> #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include <cusp/detail/timer.h> #include <cusp/hyb_matrix.h> // mgpu #include "../benchmark.h" // boost // stats #include <boost/accumulators/accumulators.hpp> #include <boost/accumulators/statistics/stats.hpp> #include <boost/accumulators/statistics/mean.hpp> #include <boost/accumulators/statistics/min.hpp> #define ValueType double #define IndexType int #define DeviceSpace cusp::device_memory #define CPUSpace cusp::host_memory struct rand_float { ValueType operator() () { return ((ValueType)(rand() % 100))/100. - 0.3; } }; int main(int argc, char *argv[]) { std::string matrixfilename = argv[1]; int ntests = 1; if (argc == 3) ntests = atoi(argv[2]); cusp::coo_matrix<IndexType, ValueType, CPUSpace> B; cusp::io::read_matrix_market_file(B, matrixfilename.c_str()); cusp::csr_matrix<IndexType, ValueType, CPUSpace> A = B; uint N = A.num_cols; uint nz = A.num_entries; // open up data file std::string filename; size_t pos = matrixfilename.find_last_of("/"); std::string matrixname; if (pos != std::string::npos ) matrixname.assign(matrixfilename.begin()+pos+1, matrixfilename.end()); else matrixname = matrixfilename; std::string datapath = "./data/" + matrixname + "_results_cusparsemgpu.txt"; std::cout << "Starting data file = " << datapath << std::endl; std::ofstream datafile(datapath.c_str()); warpkernel::startDatafile(datafile, nz,N,ntests); cusp::array1d<ValueType, CPUSpace> x(N,0); // thrust::generate(x.begin(),x.end(), rand_float()); cusp::array1d<ValueType, CPUSpace> y(N); // setup multiple run mean accumulators // find global minimum and maximum boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::min> > mgpustats; int fastestValuesPerThread=4; bool lastiter = true; // cusp multiplication { boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; cusp::csr_matrix<IndexType, ValueType, DeviceSpace> A1 = A; cusp::array1d<ValueType, DeviceSpace> dx = x; cusp::array1d<ValueType, DeviceSpace> dy = y; for (int t = 0; t < ntests; t++) { cusp::detail::timer cusptimer; cusptimer.start(); cusp::multiply(A1,dx,dy); ValueType measuredtime = cusptimer.seconds_elapsed(); statstime(measuredtime); } y = dy; if (lastiter) { std::cout << "cusp gpu time " << std::scientific << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "cusp-csr", boost::accumulators::mean(statstime), -1, -1, -1, -1); } } // Modern GPU Benchmarks { cusp::array1d<ValueType, DeviceSpace> dx = x; sparseEngine_t mgpu; sparseStatus_t status = sparseCreate("/home/jonathan/mgpu/sparse/src/cubin/", &mgpu); if(SPARSE_STATUS_SUCCESS != status) { printf("Could not create MGPU Sparse object: %s.\n", sparseStatusString(status)); return 0; } std::auto_ptr<SparseMatrix<double> > m; std::string err; bool success = ReadSparseMatrix(matrixfilename.c_str(), SparseOrderRow, &m, err); std::vector<int> rowIndices, colIndices; std::vector<double> sparseValues; DeinterleaveMatrix(*m, rowIndices, colIndices, sparseValues); int threads[] = {4,6,8,10,12,16}; for(int vals = 0; vals < 6 ; vals++) { int valuesPerThread = threads[vals]; sparseMat_t mat = 0; status = sparseMatCreate(mgpu, m->height, m->width, SPARSE_PREC_REAL8, valuesPerThread, SPARSE_INPUT_CSR, (int)m->elements.size(), &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), &mat); if(SPARSE_STATUS_SUCCESS != status) return status; cusp::array1d<ValueType, DeviceSpace> dy(N); boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for (int i=0;i<ntests;i++) { cusp::detail::timer mgputime; mgputime.start(); status = sparseMatVecDMul(mgpu, 1.0, mat, (CUdeviceptr) thrust::raw_pointer_cast(&dx[0]), 0.0, (CUdeviceptr)thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = mgputime.seconds_elapsed(); statstime(measuretime); } std::cout << "mgpu time" << valuesPerThread << "\t" << boost::accumulators::mean(statstime) << " s " << " status :" << status << std::endl; cusp::array1d<ValueType, CPUSpace> ycheck = dy; bool check = true; for(int i=0;i<N;i++) { if (abs(y[i]-ycheck[i]) > 1E-5) { check = false; break; } } if (check) { std::stringstream kernelname; kernelname << "mgpu_" << valuesPerThread; warpkernel::addData(datafile, (char *) (kernelname.str()).c_str(), boost::accumulators::mean(statstime) , -1,-1,-1,-1); mgpustats(boost::accumulators::mean(statstime)); if (boost::accumulators::min(mgpustats) == boost::accumulators::mean(statstime)) fastestValuesPerThread = valuesPerThread; } } } // CUSPARSE { cusp::array1d<ValueType, DeviceSpace> dx = x; cusparseStatus_t status2; cusparseHandle_t handle = 0; // cusparse library handle cusparseMatDescr_t descra; /* initialize cusparse library */ status2 = cusparseCreate(&handle); if (status2 != CUSPARSE_STATUS_SUCCESS) { return EXIT_FAILURE; } /* create and setup matrix descriptor */ status2= cusparseCreateMatDescr(&descra); if (status2 != CUSPARSE_STATUS_SUCCESS) { printf("Matrix descriptor initialization failed\n"); return EXIT_FAILURE; } cusparseSetMatType(descra,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descra,CUSPARSE_INDEX_BASE_ZERO); cusp::array1d<ValueType, DeviceSpace> dy(N); cusp::csr_matrix<IndexType, ValueType, DeviceSpace> dA = A; /* exercise Level 2 routines (csrmv) */ boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for(int i=0;i<ntests;i++){ cusp::detail::timer cusparse; cusparse.start(); status2 = cusparseDcsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, 1.0, descra, thrust::raw_pointer_cast(&dA.values[0]), thrust::raw_pointer_cast(&dA.row_offsets[0]), thrust::raw_pointer_cast(&dA.column_indices[0]), thrust::raw_pointer_cast(&dx[0]), 0.0, thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = cusparse.seconds_elapsed(); statstime(measuretime); } printf("%f time elapsed for multiplication cusparse\n", boost::accumulators::mean(statstime) ); cusp::array1d<ValueType, CPUSpace> ycheck = dy; bool check = true; for(int i=0;i<N;i++) { if (abs(y[i]-ycheck[i]) > 1E-5) { check = false; break; } } if (check) { std::stringstream kernelname; kernelname << "cusparse"; warpkernel::addData(datafile, (char *) (kernelname.str()).c_str(), boost::accumulators::mean(statstime) , -1,-1,-1,-1); } } std::stringstream mgpuname; mgpuname << "mgpuall_"<< fastestValuesPerThread; warpkernel::addData(datafile, (char*)(mgpuname.str()).c_str(), boost::accumulators::min(mgpustats), -1, -1, -1, -1); }
4451802c8b2322ffd859073f0941b02b211084af.hip
// !!! This is a file automatically generated by hipify!!! #include <nori/bumpMap.h> #include <filesystem/resolver.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "opencv2/imgproc/imgproc.hpp" NORI_NAMESPACE_BEGIN __device__ Vector3f BumpMap::eval(const Point2f& uv, const Frame& frame) { // rescale and shift float x = (uv.x()/m_scale.x()) - m_delta.x(); float y = (uv.y()/m_scale.y()) - m_delta.y(); x = x - floor(x); y = y - floor(y); float Bu = tex2D<float>(image_tex_x, x, y); float Bv = tex2D<float>(image_tex_y, x, y); //printf("%.3f %.3f",Bu,Bv); Vector3f n = frame.n + Bu*frame.n.cross(frame.s) - Bv*frame.n.cross(frame.t); n.normalize(); return n; } #ifndef __CUDA_ARCH__ __host__ BumpMap::BumpMap(const PropertyList &props) { modifierType = EBumpMap; m_delta = props.getPoint2("delta", Point2f(0)); m_scale = props.getVector2("scale", Vector2f(1)); height = props.getFloat("height", 5); grad_delta = props.getFloat("grad_delta", 0); filesystem::path filename = getFileResolver()->resolve(props.getString("filename")); image = cv::imread(filename.str(), CV_LOAD_IMAGE_GRAYSCALE); if (!image.data) { throw NoriException("Image %s could not be found!", filename); \ } cout << getSize() << endl; } __host__ std::string BumpMap::toString() const { return tfm::format( "BumpMap[]"); } __host__ void BumpMap::transferImage(unsigned char** target, hipTextureObject_t* image_tex, cv::Mat& img) { CHECK_ERROR( hipMallocPitch(target, &gpu_step, img.elemSize() * img.cols, img.rows)); CHECK_ERROR( hipMemcpy2D(*target, gpu_step, img.data, img.step, img.cols * img.elemSize(), img.rows, hipMemcpyHostToDevice)); hipChannelFormatDesc desc = hipCreateChannelDesc(16,0,0,0, hipChannelFormatKindSigned); // Specify texture struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = *target; resDesc.res.pitch2D.pitchInBytes = gpu_step; resDesc.res.pitch2D.width = img.cols; resDesc.res.pitch2D.height = img.rows; resDesc.res.pitch2D.desc = desc; // Specify texture object parameters struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeWrap; texDesc.addressMode[1] = hipAddressModeWrap; texDesc.filterMode = hipFilterModePoint; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeElementType; texDesc.readMode = hipReadModeNormalizedFloat; texDesc.normalizedCoords = 1; CHECK_ERROR(hipCreateTextureObject(image_tex, &resDesc, &texDesc, NULL)); } __host__ void BumpMap::gpuTransfer(NoriObject ** objects) { cv::Mat grad_x, grad_y; // derivative parameters int ddepth = CV_16S; /// Gradient X //cv::Scharr( img, grad_x, ddepth, 1, 0, height, grad_delta, cv::BORDER_DEFAULT ); cv::Sobel(image, grad_x, ddepth, 1, 0, 3, height, grad_delta, cv::BORDER_DEFAULT ); /// Gradient Y //cv::Scharr( image, grad_y, ddepth, 0, 1, height, grad_delta, cv::BORDER_DEFAULT ); cv::Sobel(image, grad_y, ddepth, 0, 1, 3, height, grad_delta, cv::BORDER_DEFAULT ); /*cv::namedWindow("test"); cv::imshow("image", grad_x); cv::waitKey(); cv::imshow("image", grad_y); cv::waitKey();*/ transferImage(&gpu_data_x, &image_tex_x, grad_x); transferImage(&gpu_data_y, &image_tex_y, grad_y); } #endif #ifndef __CUDA_ARCH__ NORI_REGISTER_CLASS(BumpMap, "bump_map") #endif NORI_NAMESPACE_END
4451802c8b2322ffd859073f0941b02b211084af.cu
#include <nori/bumpMap.h> #include <filesystem/resolver.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "opencv2/imgproc/imgproc.hpp" NORI_NAMESPACE_BEGIN __device__ Vector3f BumpMap::eval(const Point2f& uv, const Frame& frame) { // rescale and shift float x = (uv.x()/m_scale.x()) - m_delta.x(); float y = (uv.y()/m_scale.y()) - m_delta.y(); x = x - floor(x); y = y - floor(y); float Bu = tex2D<float>(image_tex_x, x, y); float Bv = tex2D<float>(image_tex_y, x, y); //printf("%.3f %.3f",Bu,Bv); Vector3f n = frame.n + Bu*frame.n.cross(frame.s) - Bv*frame.n.cross(frame.t); n.normalize(); return n; } #ifndef __CUDA_ARCH__ __host__ BumpMap::BumpMap(const PropertyList &props) { modifierType = EBumpMap; m_delta = props.getPoint2("delta", Point2f(0)); m_scale = props.getVector2("scale", Vector2f(1)); height = props.getFloat("height", 5); grad_delta = props.getFloat("grad_delta", 0); filesystem::path filename = getFileResolver()->resolve(props.getString("filename")); image = cv::imread(filename.str(), CV_LOAD_IMAGE_GRAYSCALE); if (!image.data) { throw NoriException("Image %s could not be found!", filename); \ } cout << getSize() << endl; } __host__ std::string BumpMap::toString() const { return tfm::format( "BumpMap[]"); } __host__ void BumpMap::transferImage(unsigned char** target, cudaTextureObject_t* image_tex, cv::Mat& img) { CHECK_ERROR( cudaMallocPitch(target, &gpu_step, img.elemSize() * img.cols, img.rows)); CHECK_ERROR( cudaMemcpy2D(*target, gpu_step, img.data, img.step, img.cols * img.elemSize(), img.rows, cudaMemcpyHostToDevice)); cudaChannelFormatDesc desc = cudaCreateChannelDesc(16,0,0,0, cudaChannelFormatKindSigned); // Specify texture struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = *target; resDesc.res.pitch2D.pitchInBytes = gpu_step; resDesc.res.pitch2D.width = img.cols; resDesc.res.pitch2D.height = img.rows; resDesc.res.pitch2D.desc = desc; // Specify texture object parameters struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeWrap; texDesc.addressMode[1] = cudaAddressModeWrap; texDesc.filterMode = cudaFilterModePoint; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; texDesc.readMode = cudaReadModeNormalizedFloat; texDesc.normalizedCoords = 1; CHECK_ERROR(cudaCreateTextureObject(image_tex, &resDesc, &texDesc, NULL)); } __host__ void BumpMap::gpuTransfer(NoriObject ** objects) { cv::Mat grad_x, grad_y; // derivative parameters int ddepth = CV_16S; /// Gradient X //cv::Scharr( img, grad_x, ddepth, 1, 0, height, grad_delta, cv::BORDER_DEFAULT ); cv::Sobel(image, grad_x, ddepth, 1, 0, 3, height, grad_delta, cv::BORDER_DEFAULT ); /// Gradient Y //cv::Scharr( image, grad_y, ddepth, 0, 1, height, grad_delta, cv::BORDER_DEFAULT ); cv::Sobel(image, grad_y, ddepth, 0, 1, 3, height, grad_delta, cv::BORDER_DEFAULT ); /*cv::namedWindow("test"); cv::imshow("image", grad_x); cv::waitKey(); cv::imshow("image", grad_y); cv::waitKey();*/ transferImage(&gpu_data_x, &image_tex_x, grad_x); transferImage(&gpu_data_y, &image_tex_y, grad_y); } #endif #ifndef __CUDA_ARCH__ NORI_REGISTER_CLASS(BumpMap, "bump_map") #endif NORI_NAMESPACE_END
e16444d64f4c758328a39ac08f4513b0bed262e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include "stdio.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; if (r >= numRows || c >= numCols) return; int pixelId = r*numCols + c; float channelSum = 0.299f * rgbaImage[pixelId].x + 0.587f * rgbaImage[pixelId].y + 0.114f * rgbaImage[pixelId].z; greyImage[pixelId] = (channelSum); if (r ==48 && c==110) { printf("GPU value = %f\n", channelSum); printf("GPU value = %d\n", greyImage[pixelId]); } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int block = 16; const dim3 blockSize(block, block, 1); //TODO int bx = numCols/block+1, by = numRows/block+1; printf("(bx,by) = (%d,%d)\n",bx,by); const dim3 gridSize(bx ,by , 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
e16444d64f4c758328a39ac08f4513b0bed262e9.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include "stdio.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; if (r >= numRows || c >= numCols) return; int pixelId = r*numCols + c; float channelSum = 0.299f * rgbaImage[pixelId].x + 0.587f * rgbaImage[pixelId].y + 0.114f * rgbaImage[pixelId].z; greyImage[pixelId] = (channelSum); if (r ==48 && c==110) { printf("GPU value = %f\n", channelSum); printf("GPU value = %d\n", greyImage[pixelId]); } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int block = 16; const dim3 blockSize(block, block, 1); //TODO int bx = numCols/block+1, by = numRows/block+1; printf("(bx,by) = (%d,%d)\n",bx,by); const dim3 gridSize(bx ,by , 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
913f497e8ad8fb92f78447f4049b32f7c1082e4e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <cmath> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/sds_layers.hpp" #include "caffe/util/math_functions.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> inline Dtype sigmoid(Dtype x) { return 1. / (1. + exp(-x)); } template <typename Dtype> __global__ void LocallyConnectedForward(const int nthreads, const Dtype* bottom_data, const Dtype* label_data, int width, int height, int channels, int num, int grid_size, Dtype* temp_data, Dtype* temp2_data, Dtype* temp3_data, Dtype pos_loss_wt, Dtype neg_loss_wt, const Dtype* instance_wt) { CUDA_KERNEL_LOOP(index, nthreads) { int channels2 = grid_size*grid_size; int outchannels = channels/channels2; float w = static_cast<float>(width); float h = static_cast<float>(height); float g = static_cast<float>(grid_size); int x = index % width; int y = (index / width) % height; int c = (index / width / height) % outchannels; int n = index / width / height / outchannels; const Dtype* bottom_curr = bottom_data + n*channels*width*height +c*channels2*width*height; Dtype* temp_curr = temp_data + n*channels*width*height +c*channels2*width*height; const Dtype* label_curr = label_data + n*outchannels*width*height+c*width*height; float Y = (static_cast<float>(y)+0.5)*g/h - 0.5; float Yl = floor(Y); float Yh = Yl+1.0; float wYl = Yh-Y; float wYh = Y-Yl; Yl = max(Yl, 0.0f); Yh = min(Yh, g-1.0f); float X = (static_cast<float>(x)+0.5)*g/w - 0.5; float Xl = floor(X); float Xh = Xl+1.0; float wXl = Xh-X; float wXh = X-Xl; Xl = max(Xl, 0.0f); Xh = min(Xh, g-1.0f); //compute the index from which to pull int indexll = static_cast<int>(Yl*g+Xl); int indexlh = static_cast<int>(Yl*g+Xh); int indexhl = static_cast<int>(Yh*g+Xl); int indexhh = static_cast<int>(Yh*g+Xh); //let's pick all the channels Dtype val=0.0; Dtype f; f= static_cast<Dtype>(wXl*wYl)/(1. + exp(-bottom_curr[indexll*width*height+y*width+x])); val+=f; temp_curr[indexll*width*height+y*width+x] += static_cast<Dtype>(wXl*wYl); f= static_cast<Dtype>(wXh*wYl)/(1. + exp(-bottom_curr[indexlh*width*height+y*width+x])); val+=f; temp_curr[indexlh*width*height+y*width+x] +=static_cast<Dtype>(wXh*wYl); f= static_cast<Dtype>(wXl*wYh)/(1. + exp(-bottom_curr[indexhl*width*height+y*width+x])); val+=f; temp_curr[indexhl*width*height+y*width+x] +=static_cast<Dtype>(wXl*wYh); f= static_cast<Dtype>(wXh*wYh)/(1. + exp(-bottom_curr[indexhh*width*height+y*width+x])); val+=f; temp_curr[indexhh*width*height+y*width+x] +=static_cast<Dtype>(wXh*wYh); //Save the values temp2_data[index]=val; Dtype label = label_data[index]; Dtype loss = 0.0; loss = loss - static_cast<Dtype>(label>0.5)*pos_loss_wt*log(max(val, (Dtype)FLT_MIN)); loss = loss - static_cast<Dtype>(label<0.5 && label>=0.0)*neg_loss_wt*log(max(1-val, (Dtype)FLT_MIN)); /*if(label>0.0) { loss -= label>0.5?pos_loss_wt*log(max(val, (Dtype)FLT_MIN)):neg_loss_wt*log(max(1-val, (Dtype)FLT_MIN)); }*/ temp3_data[index] = loss*instance_wt[n]; } } template <typename Dtype> void LocallyConnectedWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int num=bottom[0]->num(); const int channels=bottom[0]->channels(); const int height=bottom[0]->height(); const int width=bottom[0]->width(); const int grid_size = this->layer_param_.local_layer_param().grid_size(); const Dtype* bottom_data=bottom[0]->gpu_data(); const Dtype* bottom_labels=bottom[1]->gpu_data(); Dtype* temp_data = temp->mutable_gpu_data(); Dtype* temp2_data = temp2->mutable_gpu_data(); Dtype* temp3_data = temp3->mutable_gpu_data(); Dtype* temp4_data = temp4->mutable_gpu_data(); caffe_gpu_set(temp->count(), (Dtype)0.0, temp_data); caffe_gpu_set(temp2->count(), (Dtype)0.0, temp2_data); caffe_gpu_set(temp3->count(), (Dtype)1.0, temp3_data); caffe_gpu_set(temp4->count(), (Dtype)1.0, temp4_data); const Dtype neg_loss_wt = this->layer_param_.local_layer_param().neg_loss_wt(); const Dtype pos_loss_wt = this->layer_param_.local_layer_param().pos_loss_wt(); const int count=bottom[1]->count(); const Dtype* instance_wt = bottom[2]->gpu_data(); hipLaunchKernelGGL(( LocallyConnectedForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom_labels, width, height, channels, num, grid_size, temp_data, temp2_data, temp3_data, pos_loss_wt, neg_loss_wt, instance_wt); CUDA_POST_KERNEL_CHECK; hipDeviceSynchronize(); Dtype *loss = top[0]->mutable_cpu_data(); Dtype dot=0; caffe_gpu_dot(count, temp4_data, temp3_data, &dot); loss[0] = dot/bottom[1]->num(); } template <typename Dtype> __global__ void LocallyConnectedBackward(const int nthreads, Dtype* bottom_diff, const Dtype* bottom_data, int width, int height, int channels, int num, const Dtype* label_data, const Dtype* temp_data, const Dtype* temp2_data, int grid_size, int count, const Dtype* instance_wt) { CUDA_KERNEL_LOOP(index, nthreads){ int channels2 = grid_size*grid_size; int outchannels = channels/channels2; float w = static_cast<float>(width); float h = static_cast<float>(height); float g = static_cast<float>(grid_size); int x = index % width; int y = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int c_out = c/channels2; Dtype label = label_data[((n*outchannels+c_out)*height+y)*width+x]; Dtype alpha = temp_data[index]; Dtype sumalphap = temp2_data[((n*outchannels+c_out)*height+y)*width+x]; Dtype p = 1.0/(1.0+ exp(-bottom_data[index])); if(label>0.5) { Dtype frac = alpha*p/(max(sumalphap, (Dtype)FLT_MIN)); frac = max(min(frac, Dtype(1.0)),Dtype(0.0)); bottom_diff[index] = -frac*((Dtype)1.0 - p)*instance_wt[n]/count; } else if(label>=0) { Dtype frac = alpha*((Dtype)1.0-p)/(max((Dtype)1.0-sumalphap, (Dtype)FLT_MIN)); frac = max(min(frac, Dtype(1.0)),Dtype(0.0)); bottom_diff[index] = frac*p*instance_wt[n]/count; } } } template <typename Dtype> void LocallyConnectedWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if(!propagate_down[0]) { return; } const int num=bottom[0]->num(); const int channels=bottom[0]->channels(); const int height=bottom[0]->height(); const int width=bottom[0]->width(); const int grid_size = this->layer_param_.local_layer_param().grid_size(); Dtype* bottom_diff=bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom_diff); const int count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); const Dtype* temp_data = temp->gpu_data(); const Dtype* temp2_data = temp2->gpu_data(); const Dtype* instance_wt = bottom[2]->gpu_data(); hipLaunchKernelGGL(( LocallyConnectedBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_diff, bottom_data, width, height, channels,num, label_data, temp_data, temp2_data, grid_size, bottom[1]->num(), instance_wt); CUDA_POST_KERNEL_CHECK; const Dtype loss_weight = top[0]->cpu_diff()[0]; caffe_gpu_scal(count, loss_weight, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(LocallyConnectedWithLossLayer); } // namespace caffe
913f497e8ad8fb92f78447f4049b32f7c1082e4e.cu
#include <algorithm> #include <cfloat> #include <cmath> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/sds_layers.hpp" #include "caffe/util/math_functions.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> inline Dtype sigmoid(Dtype x) { return 1. / (1. + exp(-x)); } template <typename Dtype> __global__ void LocallyConnectedForward(const int nthreads, const Dtype* bottom_data, const Dtype* label_data, int width, int height, int channels, int num, int grid_size, Dtype* temp_data, Dtype* temp2_data, Dtype* temp3_data, Dtype pos_loss_wt, Dtype neg_loss_wt, const Dtype* instance_wt) { CUDA_KERNEL_LOOP(index, nthreads) { int channels2 = grid_size*grid_size; int outchannels = channels/channels2; float w = static_cast<float>(width); float h = static_cast<float>(height); float g = static_cast<float>(grid_size); int x = index % width; int y = (index / width) % height; int c = (index / width / height) % outchannels; int n = index / width / height / outchannels; const Dtype* bottom_curr = bottom_data + n*channels*width*height +c*channels2*width*height; Dtype* temp_curr = temp_data + n*channels*width*height +c*channels2*width*height; const Dtype* label_curr = label_data + n*outchannels*width*height+c*width*height; float Y = (static_cast<float>(y)+0.5)*g/h - 0.5; float Yl = floor(Y); float Yh = Yl+1.0; float wYl = Yh-Y; float wYh = Y-Yl; Yl = max(Yl, 0.0f); Yh = min(Yh, g-1.0f); float X = (static_cast<float>(x)+0.5)*g/w - 0.5; float Xl = floor(X); float Xh = Xl+1.0; float wXl = Xh-X; float wXh = X-Xl; Xl = max(Xl, 0.0f); Xh = min(Xh, g-1.0f); //compute the index from which to pull int indexll = static_cast<int>(Yl*g+Xl); int indexlh = static_cast<int>(Yl*g+Xh); int indexhl = static_cast<int>(Yh*g+Xl); int indexhh = static_cast<int>(Yh*g+Xh); //let's pick all the channels Dtype val=0.0; Dtype f; f= static_cast<Dtype>(wXl*wYl)/(1. + exp(-bottom_curr[indexll*width*height+y*width+x])); val+=f; temp_curr[indexll*width*height+y*width+x] += static_cast<Dtype>(wXl*wYl); f= static_cast<Dtype>(wXh*wYl)/(1. + exp(-bottom_curr[indexlh*width*height+y*width+x])); val+=f; temp_curr[indexlh*width*height+y*width+x] +=static_cast<Dtype>(wXh*wYl); f= static_cast<Dtype>(wXl*wYh)/(1. + exp(-bottom_curr[indexhl*width*height+y*width+x])); val+=f; temp_curr[indexhl*width*height+y*width+x] +=static_cast<Dtype>(wXl*wYh); f= static_cast<Dtype>(wXh*wYh)/(1. + exp(-bottom_curr[indexhh*width*height+y*width+x])); val+=f; temp_curr[indexhh*width*height+y*width+x] +=static_cast<Dtype>(wXh*wYh); //Save the values temp2_data[index]=val; Dtype label = label_data[index]; Dtype loss = 0.0; loss = loss - static_cast<Dtype>(label>0.5)*pos_loss_wt*log(max(val, (Dtype)FLT_MIN)); loss = loss - static_cast<Dtype>(label<0.5 && label>=0.0)*neg_loss_wt*log(max(1-val, (Dtype)FLT_MIN)); /*if(label>0.0) { loss -= label>0.5?pos_loss_wt*log(max(val, (Dtype)FLT_MIN)):neg_loss_wt*log(max(1-val, (Dtype)FLT_MIN)); }*/ temp3_data[index] = loss*instance_wt[n]; } } template <typename Dtype> void LocallyConnectedWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int num=bottom[0]->num(); const int channels=bottom[0]->channels(); const int height=bottom[0]->height(); const int width=bottom[0]->width(); const int grid_size = this->layer_param_.local_layer_param().grid_size(); const Dtype* bottom_data=bottom[0]->gpu_data(); const Dtype* bottom_labels=bottom[1]->gpu_data(); Dtype* temp_data = temp->mutable_gpu_data(); Dtype* temp2_data = temp2->mutable_gpu_data(); Dtype* temp3_data = temp3->mutable_gpu_data(); Dtype* temp4_data = temp4->mutable_gpu_data(); caffe_gpu_set(temp->count(), (Dtype)0.0, temp_data); caffe_gpu_set(temp2->count(), (Dtype)0.0, temp2_data); caffe_gpu_set(temp3->count(), (Dtype)1.0, temp3_data); caffe_gpu_set(temp4->count(), (Dtype)1.0, temp4_data); const Dtype neg_loss_wt = this->layer_param_.local_layer_param().neg_loss_wt(); const Dtype pos_loss_wt = this->layer_param_.local_layer_param().pos_loss_wt(); const int count=bottom[1]->count(); const Dtype* instance_wt = bottom[2]->gpu_data(); LocallyConnectedForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_labels, width, height, channels, num, grid_size, temp_data, temp2_data, temp3_data, pos_loss_wt, neg_loss_wt, instance_wt); CUDA_POST_KERNEL_CHECK; cudaDeviceSynchronize(); Dtype *loss = top[0]->mutable_cpu_data(); Dtype dot=0; caffe_gpu_dot(count, temp4_data, temp3_data, &dot); loss[0] = dot/bottom[1]->num(); } template <typename Dtype> __global__ void LocallyConnectedBackward(const int nthreads, Dtype* bottom_diff, const Dtype* bottom_data, int width, int height, int channels, int num, const Dtype* label_data, const Dtype* temp_data, const Dtype* temp2_data, int grid_size, int count, const Dtype* instance_wt) { CUDA_KERNEL_LOOP(index, nthreads){ int channels2 = grid_size*grid_size; int outchannels = channels/channels2; float w = static_cast<float>(width); float h = static_cast<float>(height); float g = static_cast<float>(grid_size); int x = index % width; int y = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int c_out = c/channels2; Dtype label = label_data[((n*outchannels+c_out)*height+y)*width+x]; Dtype alpha = temp_data[index]; Dtype sumalphap = temp2_data[((n*outchannels+c_out)*height+y)*width+x]; Dtype p = 1.0/(1.0+ exp(-bottom_data[index])); if(label>0.5) { Dtype frac = alpha*p/(max(sumalphap, (Dtype)FLT_MIN)); frac = max(min(frac, Dtype(1.0)),Dtype(0.0)); bottom_diff[index] = -frac*((Dtype)1.0 - p)*instance_wt[n]/count; } else if(label>=0) { Dtype frac = alpha*((Dtype)1.0-p)/(max((Dtype)1.0-sumalphap, (Dtype)FLT_MIN)); frac = max(min(frac, Dtype(1.0)),Dtype(0.0)); bottom_diff[index] = frac*p*instance_wt[n]/count; } } } template <typename Dtype> void LocallyConnectedWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if(!propagate_down[0]) { return; } const int num=bottom[0]->num(); const int channels=bottom[0]->channels(); const int height=bottom[0]->height(); const int width=bottom[0]->width(); const int grid_size = this->layer_param_.local_layer_param().grid_size(); Dtype* bottom_diff=bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom_diff); const int count = bottom[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); const Dtype* temp_data = temp->gpu_data(); const Dtype* temp2_data = temp2->gpu_data(); const Dtype* instance_wt = bottom[2]->gpu_data(); LocallyConnectedBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, bottom_diff, bottom_data, width, height, channels,num, label_data, temp_data, temp2_data, grid_size, bottom[1]->num(), instance_wt); CUDA_POST_KERNEL_CHECK; const Dtype loss_weight = top[0]->cpu_diff()[0]; caffe_gpu_scal(count, loss_weight, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(LocallyConnectedWithLossLayer); } // namespace caffe
a39c0adc05013487938aa43775bf172cb74ca63f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pairwise_transform.h" #define MIN 1e-12 __device__ double op(double d1,double d2,double *params) { double diff = d1 - d2; double absDiff = abs(diff); if(absDiff < MIN) return 1; return 0; } __device__ double op(double d1,double *params) { return d1; } extern "C" __global__ void eps_strided_double(int n, int xOffset,int yOffset,double *dx, double *dy,int incx,int incy,double *params,double *result,int incz) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz); }
a39c0adc05013487938aa43775bf172cb74ca63f.cu
#include "pairwise_transform.h" #define MIN 1e-12 __device__ double op(double d1,double d2,double *params) { double diff = d1 - d2; double absDiff = abs(diff); if(absDiff < MIN) return 1; return 0; } __device__ double op(double d1,double *params) { return d1; } extern "C" __global__ void eps_strided_double(int n, int xOffset,int yOffset,double *dx, double *dy,int incx,int incy,double *params,double *result,int incz) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz); }
093514d50a57960b850eff6c36b92c140286eba9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { #define FLT_MIN 1.175494351e-38F #define FLT_MAX 3.402823466e+38F __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, float* const redChannel, float* const greenChannel, float* const blueChannel) { int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x; int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y; if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows ) return ; int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } __global__ void kernel_scan(int* d_bins, int size) { int index = blockDim.x*blockIdx.x+threadIdx.x; if(index >= size) return; int temp; if(index > 0) { temp = d_bins[index - 1]; } else { temp = 0; } __syncthreads(); d_bins[index] = temp; __syncthreads(); int val = 0; for(int s=1; s<=size; s*=2) { int a = index-s; val = 0; if(a>=0) val = d_bins[a]; __syncthreads(); if(a>=0) d_bins[index] += val; __syncthreads(); } } __global__ void kernel_histo(const float* d_in, int* d_bins, float min,float max,int size, int numBins) { int index = blockDim.x*blockIdx.x+threadIdx.x; if(index<size) { int a = ((d_in[index] - min)/(max-min))* numBins; atomicAdd(&d_bins[a], 1); } } __global__ void kernel_maxmin(float* d_in, float*d_out, int size, int maxmin) { int tid = threadIdx.x; int x = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ float shared[]; if(x>=size) return ; if(x<size) shared[tid] = d_in[x]; else { if(maxmin == 0) shared[tid] = FLT_MAX; else shared[tid] = -FLT_MAX; } __syncthreads(); for(int s=1; s<blockDim.x; s++) { if(tid % (2*s) == 0) { if(s+tid < blockDim.x) if(maxmin == 0) shared[tid] = min(shared[tid], shared[tid+s]); else shared[tid] = max(shared[tid], shared[tid+s]); } __syncthreads(); } __syncthreads(); if(tid == 0) d_out[blockIdx.x] = shared[0]; } __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } __global__ void recombineChannels(const float* const redChannel, const float* const greenChannel, const float* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } }
093514d50a57960b850eff6c36b92c140286eba9.cu
extern "C" { #define FLT_MIN 1.175494351e-38F #define FLT_MAX 3.402823466e+38F __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, float* const redChannel, float* const greenChannel, float* const blueChannel) { int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x; int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y; if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows ) return ; int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } __global__ void kernel_scan(int* d_bins, int size) { int index = blockDim.x*blockIdx.x+threadIdx.x; if(index >= size) return; int temp; if(index > 0) { temp = d_bins[index - 1]; } else { temp = 0; } __syncthreads(); d_bins[index] = temp; __syncthreads(); int val = 0; for(int s=1; s<=size; s*=2) { int a = index-s; val = 0; if(a>=0) val = d_bins[a]; __syncthreads(); if(a>=0) d_bins[index] += val; __syncthreads(); } } __global__ void kernel_histo(const float* d_in, int* d_bins, float min,float max,int size, int numBins) { int index = blockDim.x*blockIdx.x+threadIdx.x; if(index<size) { int a = ((d_in[index] - min)/(max-min))* numBins; atomicAdd(&d_bins[a], 1); } } __global__ void kernel_maxmin(float* d_in, float*d_out, int size, int maxmin) { int tid = threadIdx.x; int x = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ float shared[]; if(x>=size) return ; if(x<size) shared[tid] = d_in[x]; else { if(maxmin == 0) shared[tid] = FLT_MAX; else shared[tid] = -FLT_MAX; } __syncthreads(); for(int s=1; s<blockDim.x; s++) { if(tid % (2*s) == 0) { if(s+tid < blockDim.x) if(maxmin == 0) shared[tid] = min(shared[tid], shared[tid+s]); else shared[tid] = max(shared[tid], shared[tid+s]); } __syncthreads(); } __syncthreads(); if(tid == 0) d_out[blockIdx.x] = shared[0]; } __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } __global__ void recombineChannels(const float* const redChannel, const float* const greenChannel, const float* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } }